getopts collections test rand \
core alloc \
rustc_unicode rustc_bitflags \
- alloc_system alloc_jemalloc
+ alloc_system alloc_jemalloc \
+ panic_abort panic_unwind unwind
RUSTC_CRATES := rustc rustc_typeck rustc_mir rustc_borrowck rustc_resolve rustc_driver \
rustc_trans rustc_back rustc_llvm rustc_privacy rustc_lint \
rustc_data_structures rustc_platform_intrinsics \
DEPS_rand := core
DEPS_rustc_bitflags := core
DEPS_rustc_unicode := core
+DEPS_panic_abort := libc alloc
+DEPS_panic_unwind := libc alloc unwind
+DEPS_unwind := libc
+
+# FIXME(stage0): change this to just `RUSTFLAGS_panic_abort := ...`
+RUSTFLAGS1_panic_abort := -C panic=abort
+RUSTFLAGS2_panic_abort := -C panic=abort
+RUSTFLAGS3_panic_abort := -C panic=abort
DEPS_std := core libc rand alloc collections rustc_unicode \
native:backtrace \
- alloc_system
+ alloc_system panic_abort panic_unwind unwind
DEPS_arena := std
DEPS_glob := std
DEPS_flate := std native:miniz
ONLY_RLIB_rustc_bitflags := 1
ONLY_RLIB_alloc_system := 1
ONLY_RLIB_alloc_jemalloc := 1
+ONLY_RLIB_panic_unwind := 1
+ONLY_RLIB_panic_abort := 1
+ONLY_RLIB_unwind := 1
TARGET_SPECIFIC_alloc_jemalloc := 1
$(eval $(call RUST_CRATE,collectionstest))
TEST_TARGET_CRATES = $(filter-out core rustc_unicode alloc_system libc \
- alloc_jemalloc,$(TARGET_CRATES)) \
+ alloc_jemalloc panic_unwind \
+ panic_abort,$(TARGET_CRATES)) \
collectionstest coretest
TEST_DOC_CRATES = $(DOC_CRATES) arena flate fmt_macros getopts graphviz \
log rand rbml serialize syntax term test
} else {
env::var_os("RUSTC_REAL").unwrap()
};
+ let stage = env::var("RUSTC_STAGE").unwrap();
let mut cmd = Command::new(rustc);
cmd.args(&args)
- .arg("--cfg").arg(format!("stage{}", env::var("RUSTC_STAGE").unwrap()));
+ .arg("--cfg").arg(format!("stage{}", stage));
if let Some(target) = target {
// The stage0 compiler has a special sysroot distinct from what we
cmd.args(&s.split(" ").filter(|s| !s.is_empty()).collect::<Vec<_>>());
}
+ // If we're compiling specifically the `panic_abort` crate then we pass
+ // the `-C panic=abort` option. Note that we do not do this for any
+ // other crate intentionally as this is the only crate for now that we
+ // ship with panic=abort.
+ //
+ // This... is a bit of a hack how we detect this. Ideally this
+ // information should be encoded in the crate I guess? Would likely
+ // require an RFC amendment to RFC 1513, however.
+ let is_panic_abort = args.windows(2).any(|a| {
+ &*a[0] == "--crate-name" && &*a[1] == "panic_abort"
+ });
+ // FIXME(stage0): remove this `stage != "0"` condition
+ if is_panic_abort && stage != "0" {
+ cmd.arg("-C").arg("panic=abort");
+ }
+
// Set various options from config.toml to configure how we're building
// code.
if env::var("RUSTC_DEBUGINFO") == Ok("true".to_string()) {
describing it. The short story is that Getopts generates an argument
parser and a help message from a vector of options (The fact that it
is a vector is hidden behind a struct and a set of methods). Once the
-parsing is done, we can decode the program arguments into a Rust
-struct. From there, we can get information about the flags, for
+parsing is done, the parser returns a struct that records matches
+for defined options, and remaining "free" arguments.
+From there, we can get information about the flags, for
instance, whether they were passed in, and what arguments they
had. Here's our program with the appropriate `extern crate`
statements, and the basic argument setup for Getopts:
print_usage(&program, opts);
return;
}
- let data_path = &args[1];
- let city = &args[2];
+ let data_path = &matches.free[0];
+ let city: &str = &matches.free[1];
// Do stuff with information
}
return;
}
- let data_path = &args[1];
- let city: &str = &args[2];
+ let data_path = &matches.free[0];
+ let city: &str = &matches.free[1];
let file = File::open(data_path).unwrap();
let mut rdr = csv::Reader::from_reader(file);
Ok(m) => { m }
Err(e) => { panic!(e.to_string()) }
};
+
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
- let data_path = &args[1];
- let city = &args[2];
+ let data_path = &matches.free[0];
+ let city: &str = &matches.free[1];
+
for pop in search(data_path, city) {
println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
}
```rust,ignore
...
-match search(&data_file, &city) {
- Ok(pops) => {
- for pop in pops {
- println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
+ match search(data_path, city) {
+ Ok(pops) => {
+ for pop in pops {
+ println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
+ }
}
+ Err(err) => println!("{}", err)
}
- Err(err) => println!("{}", err)
-}
...
```
println!("{}", opts.usage(&format!("Usage: {} [options] <city>", program)));
}
```
-The next part is going to be only a little harder:
+Of course we need to adapt the argument handling code:
```rust,ignore
...
-let mut opts = Options::new();
-opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME");
-opts.optflag("h", "help", "Show this usage message.");
-...
-let file = matches.opt_str("f");
-let data_file = &file.as_ref().map(Path::new);
-
-let city = if !matches.free.is_empty() {
- &matches.free[0]
-} else {
- print_usage(&program, opts);
- return;
-};
-
-match search(data_file, city) {
- Ok(pops) => {
- for pop in pops {
- println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
+ let mut opts = Options::new();
+ opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME");
+ opts.optflag("h", "help", "Show this usage message.");
+ ...
+ let data_path = matches.opt_str("f");
+
+ let city = if !matches.free.is_empty() {
+ &matches.free[0]
+ } else {
+ print_usage(&program, opts);
+ return;
+ };
+
+ match search(&data_path, city) {
+ Ok(pops) => {
+ for pop in pops {
+ println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
+ }
}
+ Err(err) => println!("{}", err)
}
- Err(err) => println!("{}", err)
-}
...
```
-In this piece of code, we take `file` (which has the type
-`Option<String>`), and convert it to a type that `search` can use, in
-this case, `&Option<AsRef<Path>>`. To do this, we take a reference of
-file, and map `Path::new` onto it. In this case, `as_ref()` converts
-the `Option<String>` into an `Option<&str>`, and from there, we can
-execute `Path::new` to the content of the optional, and return the
-optional of the new value. Once we have that, it is a simple matter of
-getting the `city` argument and executing `search`.
+We've made the user experience a bit nicer by showing the usage message,
+instead of a panic from an out-of-bounds index, when `city`, the
+remaining free argument, is not present.
Modifying `search` is slightly trickier. The `csv` crate can build a
parser out of
And now for impls on `Display` and `Error`:
```rust,ignore
+use std::fmt;
+
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
}
}
- fn cause(&self) -> Option<&error::Error> {
- match *self {
+ fn cause(&self) -> Option<&Error> {
+ match *self {
CliError::Io(ref err) => Some(err),
- CliError::Parse(ref err) => Some(err),
- // Our custom error doesn't have an underlying cause, but we could
- // modify it so that it does.
- CliError::NotFound() => None,
+ CliError::Csv(ref err) => Some(err),
+ // Our custom error doesn't have an underlying cause,
+ // but we could modify it so that it does.
+ CliError::NotFound => None,
}
}
}
```rust,ignore
...
-let mut opts = Options::new();
-opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME");
-opts.optflag("h", "help", "Show this usage message.");
-opts.optflag("q", "quiet", "Silences errors and warnings.");
+ let mut opts = Options::new();
+ opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME");
+ opts.optflag("h", "help", "Show this usage message.");
+ opts.optflag("q", "quiet", "Silences errors and warnings.");
...
```
tweak the case analysis in `main`:
```rust,ignore
-match search(&args.arg_data_path, &args.arg_city) {
- Err(CliError::NotFound) if args.flag_quiet => process::exit(1),
- Err(err) => panic!("{}", err),
- Ok(pops) => for pop in pops {
- println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
+use std::process;
+...
+ match search(&data_path, city) {
+ Err(CliError::NotFound) if matches.opt_present("q") => process::exit(1),
+ Err(err) => panic!("{}", err),
+ Ok(pops) => for pop in pops {
+ println!("{}, {}: {:?}", pop.city, pop.country, pop.count);
+ }
}
-}
+...
```
Certainly, we don't want to be quiet if there was an IO error or if the data
let s = "foo
bar";
-assert_eq!("foo\n bar", s);
+assert_eq!("foo\n bar", s);
```
The second, with a `\`, trims the spaces and the newline:
form or name",
issue = "27783")]
#![feature(allocator)]
-#![feature(libc)]
#![feature(staged_api)]
-
-extern crate libc;
+#![cfg_attr(unix, feature(libc))]
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
#[cfg(unix)]
mod imp {
+ extern crate libc;
+
use core::cmp;
use core::ptr;
- use libc;
use MIN_ALIGN;
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
///
/// # Examples
///
+/// Basic usage:
+///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!".to_string());
/// ```
+///
+/// Please note that using [`format!`][format!] might be preferrable.
+/// Example:
+///
+/// ```
+/// let s = format!("Hello, {}!", "world");
+/// assert_eq!(s, "Hello, world!".to_string());
+/// ```
+///
+/// [format!]: ../macro.format!.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments) -> string::String {
let mut output = string::String::new();
String::from("ศไทย中华Việt Nam"));
let xs = b"hello\xFF".to_vec();
- let err = String::from_utf8(xs).err().unwrap();
+ let err = String::from_utf8(xs).unwrap_err();
assert_eq!(err.into_bytes(), b"hello\xff".to_vec());
}
///
/// * output - the buffer to write output to
/// * args - the precompiled arguments generated by `format_args!`
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let mut output = String::new();
+/// fmt::write(&mut output, format_args!("Hello {}!", "world"))
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// Please note that using [`write!`][write_macro] might be preferrable. Example:
+///
+/// ```
+/// use std::fmt::Write;
+///
+/// let mut output = String::new();
+/// write!(&mut output, "Hello {}!", "world")
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// [write_macro]: ../../std/macro.write!.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn write(output: &mut Write, args: Arguments) -> Result {
let mut formatter = Formatter {
--- /dev/null
+[package]
+authors = ["The Rust Project Developers"]
+name = "panic_abort"
+version = "0.0.0"
+
+[lib]
+path = "lib.rs"
+
+[dependencies]
+core = { path = "../libcore" }
+libc = { path = "../rustc/libc_shim" }
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of Rust panics via process aborts
+//!
+//! When compared to the implementation via unwinding, this crate is *much*
+//! simpler! That being said, it's not quite as versatile, but here goes!
+
+#![no_std]
+#![crate_name = "panic_abort"]
+#![crate_type = "rlib"]
+#![unstable(feature = "panic_abort", issue = "32837")]
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+ html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+ html_root_url = "https://doc.rust-lang.org/nightly/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
+#![cfg_attr(not(stage0), deny(warnings))]
+
+#![feature(staged_api)]
+
+#![cfg_attr(not(stage0), panic_runtime)]
+#![cfg_attr(not(stage0), feature(panic_runtime))]
+#![cfg_attr(unix, feature(libc))]
+#![cfg_attr(windows, feature(core_intrinsics))]
+
+// Rust's "try" function, but if we're aborting on panics we just call the
+// function as there's nothing else we need to do here.
+#[no_mangle]
+pub unsafe extern fn __rust_maybe_catch_panic(f: fn(*mut u8),
+ data: *mut u8,
+ _data_ptr: *mut usize,
+ _vtable_ptr: *mut usize) -> u32 {
+ f(data);
+ 0
+}
+
+// "Leak" the payload and shim to the relevant abort on the platform in
+// question.
+//
+// For Unix we just use `abort` from libc as it'll trigger debuggers, core
+// dumps, etc, as one might expect. On Windows, however, the best option we have
+// is the `__fastfail` intrinsics, but that's unfortunately not defined in LLVM,
+// and the `RaiseFailFastException` function isn't available until Windows 7
+// which would break compat with XP. For now just use `intrinsics::abort` which
+// will kill us with an illegal instruction, which will do a good enough job for
+// now hopefully.
+#[no_mangle]
+pub unsafe extern fn __rust_start_panic(_data: usize, _vtable: usize) -> u32 {
+ return abort();
+
+ #[cfg(unix)]
+ unsafe fn abort() -> ! {
+ extern crate libc;
+ libc::abort();
+ }
+
+ #[cfg(windows)]
+ unsafe fn abort() -> ! {
+ core::intrinsics::abort();
+ }
+}
+
+// This... is a bit of an oddity. The tl;dr; is that this is required to link
+// correctly, the longer explanation is below.
+//
+// Right now the binaries of libcore/libstd that we ship are all compiled with
+// `-C panic=unwind`. This is done to ensure that the binaries are maximally
+// compatible with as many situations as possible. The compiler, however,
+// requires a "personality function" for all functions compiled with `-C
+// panic=unwind`. This personality function is hardcoded to the symbol
+// `rust_eh_personality` and is defined by the `eh_personality` lang item.
+//
+// So... why not just define that lang item here? Good question! The way that
+// panic runtimes are linked in is actually a little subtle in that they're
+// "sort of" in the compiler's crate store, but only actually linked if another
+// isn't actually linked. This ends up meaning that both this crate and the
+// panic_unwind crate can appear in the compiler's crate store, and if both
+// define the `eh_personality` lang item then that'll hit an error.
+//
+// To handle this the compiler only requires the `eh_personality` is defined if
+// the panic runtime being linked in is the unwinding runtime, and otherwise
+// it's not required to be defined (rightfully so). In this case, however, this
+// library just defines this symbol so there's at least some personality
+// somewhere.
+//
+// Essentially this symbol is just defined to get wired up to libcore/libstd
+// binaries, but it should never be called as we don't link in an unwinding
+// runtime at all.
+#[cfg(not(stage0))]
+pub mod personalities {
+
+ #[no_mangle]
+ #[cfg(not(all(target_os = "windows",
+ target_env = "gnu",
+ target_arch = "x86_64")))]
+ pub extern fn rust_eh_personality() {}
+
+ // On x86_64-pc-windows-gnu we use our own personality function that needs
+ // to return `ExceptionContinueSearch` as we're passing on all our frames.
+ #[no_mangle]
+ #[cfg(all(target_os = "windows",
+ target_env = "gnu",
+ target_arch = "x86_64"))]
+ pub extern fn rust_eh_personality(_record: usize,
+ _frame: usize,
+ _context: usize,
+ _dispatcher: usize) -> u32 {
+ 1 // `ExceptionContinueSearch`
+ }
+
+ // Similar to above, this corresponds to the `eh_unwind_resume` lang item
+ // that's only used on Windows currently.
+ //
+ // Note that we don't execute landing pads, so this is never called, so it's
+ // body is empty.
+ #[no_mangle]
+ #[cfg(all(target_os = "windows", target_env = "gnu"))]
+ pub extern fn rust_eh_unwind_resume() {}
+
+ // These two are called by our startup objects on i686-pc-windows-gnu, but
+ // they don't need to do anything so the bodies are nops.
+ #[no_mangle]
+ #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
+ pub extern fn rust_eh_register_frames() {}
+ #[no_mangle]
+ #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
+ pub extern fn rust_eh_unregister_frames() {}
+}
--- /dev/null
+[root]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc 0.0.0",
+ "core 0.0.0",
+ "libc 0.0.0",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "core 0.0.0",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "libc"
+version = "0.0.0"
+dependencies = [
+ "core 0.0.0",
+]
+
--- /dev/null
+[package]
+authors = ["The Rust Project Developers"]
+name = "panic_unwind"
+version = "0.0.0"
+
+[lib]
+path = "lib.rs"
+
+[dependencies]
+alloc = { path = "../liballoc" }
+core = { path = "../libcore" }
+libc = { path = "../rustc/libc_shim" }
+unwind = { path = "../libunwind" }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Parsing of GCC-style Language-Specific Data Area (LSDA)
+//! For details see:
+//! http://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+//! http://mentorembedded.github.io/cxx-abi/exceptions.pdf
+//! http://www.airs.com/blog/archives/460
+//! http://www.airs.com/blog/archives/464
+//!
+//! A reference implementation may be found in the GCC source tree
+//! (<root>/libgcc/unwind-c.c as of this writing)
+
+#![allow(non_upper_case_globals)]
+#![allow(unused)]
+
+use dwarf::DwarfReader;
+use core::mem;
+
+pub const DW_EH_PE_omit : u8 = 0xFF;
+pub const DW_EH_PE_absptr : u8 = 0x00;
+
+pub const DW_EH_PE_uleb128 : u8 = 0x01;
+pub const DW_EH_PE_udata2 : u8 = 0x02;
+pub const DW_EH_PE_udata4 : u8 = 0x03;
+pub const DW_EH_PE_udata8 : u8 = 0x04;
+pub const DW_EH_PE_sleb128 : u8 = 0x09;
+pub const DW_EH_PE_sdata2 : u8 = 0x0A;
+pub const DW_EH_PE_sdata4 : u8 = 0x0B;
+pub const DW_EH_PE_sdata8 : u8 = 0x0C;
+
+pub const DW_EH_PE_pcrel : u8 = 0x10;
+pub const DW_EH_PE_textrel : u8 = 0x20;
+pub const DW_EH_PE_datarel : u8 = 0x30;
+pub const DW_EH_PE_funcrel : u8 = 0x40;
+pub const DW_EH_PE_aligned : u8 = 0x50;
+
+pub const DW_EH_PE_indirect : u8 = 0x80;
+
+#[derive(Copy, Clone)]
+pub struct EHContext {
+ pub ip: usize, // Current instruction pointer
+ pub func_start: usize, // Address of the current function
+ pub text_start: usize, // Address of the code section
+ pub data_start: usize, // Address of the data section
+}
+
+pub unsafe fn find_landing_pad(lsda: *const u8, context: &EHContext)
+ -> Option<usize> {
+ if lsda.is_null() {
+ return None;
+ }
+
+ let func_start = context.func_start;
+ let mut reader = DwarfReader::new(lsda);
+
+ let start_encoding = reader.read::<u8>();
+ // base address for landing pad offsets
+ let lpad_base = if start_encoding != DW_EH_PE_omit {
+ read_encoded_pointer(&mut reader, context, start_encoding)
+ } else {
+ func_start
+ };
+
+ let ttype_encoding = reader.read::<u8>();
+ if ttype_encoding != DW_EH_PE_omit {
+ // Rust doesn't analyze exception types, so we don't care about the type table
+ reader.read_uleb128();
+ }
+
+ let call_site_encoding = reader.read::<u8>();
+ let call_site_table_length = reader.read_uleb128();
+ let action_table = reader.ptr.offset(call_site_table_length as isize);
+ // Return addresses point 1 byte past the call instruction, which could
+ // be in the next IP range.
+ let ip = context.ip-1;
+
+ while reader.ptr < action_table {
+ let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_action = reader.read_uleb128();
+ // Callsite table is sorted by cs_start, so if we've passed the ip, we
+ // may stop searching.
+ if ip < func_start + cs_start {
+ break
+ }
+ if ip < func_start + cs_start + cs_len {
+ if cs_lpad != 0 {
+ return Some(lpad_base + cs_lpad);
+ } else {
+ return None;
+ }
+ }
+ }
+ // IP range not found: gcc's C++ personality calls terminate() here,
+ // however the rest of the languages treat this the same as cs_lpad == 0.
+ // We follow this suit.
+ None
+}
+
+#[inline]
+fn round_up(unrounded: usize, align: usize) -> usize {
+ assert!(align.is_power_of_two());
+ (unrounded + align - 1) & !(align - 1)
+}
+
+unsafe fn read_encoded_pointer(reader: &mut DwarfReader,
+ context: &EHContext,
+ encoding: u8) -> usize {
+ assert!(encoding != DW_EH_PE_omit);
+
+ // DW_EH_PE_aligned implies it's an absolute pointer value
+ if encoding == DW_EH_PE_aligned {
+ reader.ptr = round_up(reader.ptr as usize,
+ mem::size_of::<usize>()) as *const u8;
+ return reader.read::<usize>();
+ }
+
+ let mut result = match encoding & 0x0F {
+ DW_EH_PE_absptr => reader.read::<usize>(),
+ DW_EH_PE_uleb128 => reader.read_uleb128() as usize,
+ DW_EH_PE_udata2 => reader.read::<u16>() as usize,
+ DW_EH_PE_udata4 => reader.read::<u32>() as usize,
+ DW_EH_PE_udata8 => reader.read::<u64>() as usize,
+ DW_EH_PE_sleb128 => reader.read_sleb128() as usize,
+ DW_EH_PE_sdata2 => reader.read::<i16>() as usize,
+ DW_EH_PE_sdata4 => reader.read::<i32>() as usize,
+ DW_EH_PE_sdata8 => reader.read::<i64>() as usize,
+ _ => panic!()
+ };
+
+ result += match encoding & 0x70 {
+ DW_EH_PE_absptr => 0,
+ // relative to address of the encoded value, despite the name
+ DW_EH_PE_pcrel => reader.ptr as usize,
+ DW_EH_PE_textrel => { assert!(context.text_start != 0);
+ context.text_start },
+ DW_EH_PE_datarel => { assert!(context.data_start != 0);
+ context.data_start },
+ DW_EH_PE_funcrel => { assert!(context.func_start != 0);
+ context.func_start },
+ _ => panic!()
+ };
+
+ if encoding & DW_EH_PE_indirect != 0 {
+ result = *(result as *const usize);
+ }
+
+ result
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Utilities for parsing DWARF-encoded data streams.
+//! See http://www.dwarfstd.org,
+//! DWARF-4 standard, Section 7 - "Data Representation"
+
+// This module is used only by x86_64-pc-windows-gnu for now, but we
+// are compiling it everywhere to avoid regressions.
+#![allow(unused)]
+
+pub mod eh;
+
+use core::mem;
+
+pub struct DwarfReader {
+ pub ptr : *const u8
+}
+
+#[repr(C,packed)]
+struct Unaligned<T>(T);
+
+impl DwarfReader {
+
+ pub fn new(ptr : *const u8) -> DwarfReader {
+ DwarfReader {
+ ptr : ptr
+ }
+ }
+
+ // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned
+ // on a 4-byte boundary. This may cause problems on platforms with strict
+ // alignment requirements. By wrapping data in a "packed" struct, we are
+ // telling the backend to generate "misalignment-safe" code.
+ pub unsafe fn read<T:Copy>(&mut self) -> T {
+ let Unaligned(result) = *(self.ptr as *const Unaligned<T>);
+ self.ptr = self.ptr.offset(mem::size_of::<T>() as isize);
+ result
+ }
+
+ // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable
+ // Length Data".
+ pub unsafe fn read_uleb128(&mut self) -> u64 {
+ let mut shift : usize = 0;
+ let mut result : u64 = 0;
+ let mut byte : u8;
+ loop {
+ byte = self.read::<u8>();
+ result |= ((byte & 0x7F) as u64) << shift;
+ shift += 7;
+ if byte & 0x80 == 0 {
+ break;
+ }
+ }
+ result
+ }
+
+ pub unsafe fn read_sleb128(&mut self) -> i64 {
+ let mut shift : usize = 0;
+ let mut result : u64 = 0;
+ let mut byte : u8;
+ loop {
+ byte = self.read::<u8>();
+ result |= ((byte & 0x7F) as u64) << shift;
+ shift += 7;
+ if byte & 0x80 == 0 {
+ break;
+ }
+ }
+ // sign-extend
+ if shift < 8 * mem::size_of::<u64>() && (byte & 0x40) != 0 {
+ result |= (!0 as u64) << shift;
+ }
+ result as i64
+ }
+}
+
+#[test]
+fn dwarf_reader() {
+ let encoded: &[u8] = &[1,
+ 2, 3,
+ 4, 5, 6, 7,
+ 0xE5, 0x8E, 0x26,
+ 0x9B, 0xF1, 0x59,
+ 0xFF, 0xFF];
+
+ let mut reader = DwarfReader::new(encoded.as_ptr());
+
+ unsafe {
+ assert!(reader.read::<u8>() == u8::to_be(1u8));
+ assert!(reader.read::<u16>() == u16::to_be(0x0203));
+ assert!(reader.read::<u32>() == u32::to_be(0x04050607));
+
+ assert!(reader.read_uleb128() == 624485);
+ assert!(reader.read_sleb128() == -624485);
+
+ assert!(reader.read::<i8>() == i8::to_be(-1));
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of panics backed by libgcc/libunwind (in some form)
+//!
+//! For background on exception handling and stack unwinding please see
+//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
+//! documents linked from it.
+//! These are also good reads:
+//! http://mentorembedded.github.io/cxx-abi/abi-eh.html
+//! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
+//! http://www.airs.com/blog/index.php?s=exception+frames
+//!
+//! ## A brief summary
+//!
+//! Exception handling happens in two phases: a search phase and a cleanup
+//! phase.
+//!
+//! In both phases the unwinder walks stack frames from top to bottom using
+//! information from the stack frame unwind sections of the current process's
+//! modules ("module" here refers to an OS module, i.e. an executable or a
+//! dynamic library).
+//!
+//! For each stack frame, it invokes the associated "personality routine", whose
+//! address is also stored in the unwind info section.
+//!
+//! In the search phase, the job of a personality routine is to examine
+//! exception object being thrown, and to decide whether it should be caught at
+//! that stack frame. Once the handler frame has been identified, cleanup phase
+//! begins.
+//!
+//! In the cleanup phase, the unwinder invokes each personality routine again.
+//! This time it decides which (if any) cleanup code needs to be run for
+//! the current stack frame. If so, the control is transferred to a special
+//! branch in the function body, the "landing pad", which invokes destructors,
+//! frees memory, etc. At the end of the landing pad, control is transferred
+//! back to the unwinder and unwinding resumes.
+//!
+//! Once stack has been unwound down to the handler frame level, unwinding stops
+//! and the last personality routine transfers control to the catch block.
+//!
+//! ## `eh_personality` and `eh_unwind_resume`
+//!
+//! These language items are used by the compiler when generating unwind info.
+//! The first one is the personality routine described above. The second one
+//! allows compilation target to customize the process of resuming unwind at the
+//! end of the landing pads. `eh_unwind_resume` is used only if
+//! `custom_unwind_resume` flag in the target options is set.
+
+#![allow(private_no_mangle_fns)]
+
+use core::any::Any;
+use alloc::boxed::Box;
+
+use unwind as uw;
+
+#[repr(C)]
+struct Exception {
+ _uwe: uw::_Unwind_Exception,
+ cause: Option<Box<Any + Send>>,
+}
+
+pub unsafe fn panic(data: Box<Any + Send>) -> u32 {
+ let exception = Box::new(Exception {
+ _uwe: uw::_Unwind_Exception {
+ exception_class: rust_exception_class(),
+ exception_cleanup: exception_cleanup,
+ private: [0; uw::unwinder_private_data_size],
+ },
+ cause: Some(data),
+ });
+ let exception_param = Box::into_raw(exception) as *mut uw::_Unwind_Exception;
+ return uw::_Unwind_RaiseException(exception_param) as u32;
+
+ extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
+ exception: *mut uw::_Unwind_Exception) {
+ unsafe {
+ let _: Box<Exception> = Box::from_raw(exception as *mut Exception);
+ }
+ }
+}
+
+pub fn payload() -> *mut u8 {
+ 0 as *mut u8
+}
+
+pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send> {
+ let my_ep = ptr as *mut Exception;
+ let cause = (*my_ep).cause.take();
+ uw::_Unwind_DeleteException(ptr as *mut _);
+ cause.unwrap()
+}
+
+// Rust's exception class identifier. This is used by personality routines to
+// determine whether the exception was thrown by their own runtime.
+fn rust_exception_class() -> uw::_Unwind_Exception_Class {
+ // M O Z \0 R U S T -- vendor, language
+ 0x4d4f5a_00_52555354
+}
+
+// We could implement our personality routine in Rust, however exception
+// info decoding is tedious. More importantly, personality routines have to
+// handle various platform quirks, which are not fun to maintain. For this
+// reason, we attempt to reuse personality routine of the C language:
+// __gcc_personality_v0.
+//
+// Since C does not support exception catching, __gcc_personality_v0 simply
+// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
+// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
+//
+// This is pretty close to Rust's exception handling approach, except that Rust
+// does have a single "catch-all" handler at the bottom of each thread's stack.
+// So we have two versions of the personality routine:
+// - rust_eh_personality, used by all cleanup landing pads, which never catches,
+// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
+// - rust_eh_personality_catch, used only by rust_try(), which always catches.
+//
+// See also: rustc_trans::trans::intrinsic::trans_gnu_try
+
+#[cfg(all(not(target_arch = "arm"),
+ not(all(windows, target_arch = "x86_64"))))]
+pub mod eabi {
+ use unwind as uw;
+ use libc::c_int;
+
+ extern {
+ fn __gcc_personality_v0(version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code;
+ }
+
+ #[lang = "eh_personality"]
+ #[no_mangle]
+ extern fn rust_eh_personality(
+ version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
+ ) -> uw::_Unwind_Reason_Code
+ {
+ unsafe {
+ __gcc_personality_v0(version, actions, exception_class, ue_header,
+ context)
+ }
+ }
+
+ #[lang = "eh_personality_catch"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality_catch(
+ version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
+ ) -> uw::_Unwind_Reason_Code
+ {
+
+ if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+ uw::_URC_HANDLER_FOUND // catch!
+ }
+ else { // cleanup phase
+ unsafe {
+ __gcc_personality_v0(version, actions, exception_class, ue_header,
+ context)
+ }
+ }
+ }
+}
+
+// iOS on armv7 is using SjLj exceptions and therefore requires to use
+// a specialized personality routine: __gcc_personality_sj0
+
+#[cfg(all(target_os = "ios", target_arch = "arm"))]
+pub mod eabi {
+ use unwind as uw;
+ use libc::c_int;
+
+ extern {
+ fn __gcc_personality_sj0(version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code;
+ }
+
+ #[lang = "eh_personality"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality(
+ version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
+ ) -> uw::_Unwind_Reason_Code
+ {
+ unsafe {
+ __gcc_personality_sj0(version, actions, exception_class, ue_header,
+ context)
+ }
+ }
+
+ #[lang = "eh_personality_catch"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality_catch(
+ version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
+ ) -> uw::_Unwind_Reason_Code
+ {
+ if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+ uw::_URC_HANDLER_FOUND // catch!
+ }
+ else { // cleanup phase
+ unsafe {
+ __gcc_personality_sj0(version, actions, exception_class, ue_header,
+ context)
+ }
+ }
+ }
+}
+
+
+// ARM EHABI uses a slightly different personality routine signature,
+// but otherwise works the same.
+#[cfg(all(target_arch = "arm", not(target_os = "ios")))]
+pub mod eabi {
+ use unwind as uw;
+ use libc::c_int;
+
+ extern {
+ fn __gcc_personality_v0(state: uw::_Unwind_State,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code;
+ }
+
+ #[lang = "eh_personality"]
+ #[no_mangle]
+ extern fn rust_eh_personality(
+ state: uw::_Unwind_State,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
+ ) -> uw::_Unwind_Reason_Code
+ {
+ unsafe {
+ __gcc_personality_v0(state, ue_header, context)
+ }
+ }
+
+ #[lang = "eh_personality_catch"]
+ #[no_mangle]
+ pub extern fn rust_eh_personality_catch(
+ state: uw::_Unwind_State,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context
+ ) -> uw::_Unwind_Reason_Code
+ {
+ // Backtraces on ARM will call the personality routine with
+ // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
+ // we want to continue unwinding the stack, otherwise all our backtraces
+ // would end at __rust_try.
+ if (state as c_int & uw::_US_ACTION_MASK as c_int)
+ == uw::_US_VIRTUAL_UNWIND_FRAME as c_int
+ && (state as c_int & uw::_US_FORCE_UNWIND as c_int) == 0 { // search phase
+ uw::_URC_HANDLER_FOUND // catch!
+ }
+ else { // cleanup phase
+ unsafe {
+ __gcc_personality_v0(state, ue_header, context)
+ }
+ }
+ }
+}
+
+// See docs in the `unwind` module.
+#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
+#[lang = "eh_unwind_resume"]
+#[unwind]
+unsafe extern fn rust_eh_unwind_resume(panic_ctx: *mut u8) -> ! {
+ uw::_Unwind_Resume(panic_ctx as *mut uw::_Unwind_Exception);
+}
+
+// Frame unwind info registration
+//
+// Each module's image contains a frame unwind info section (usually
+// ".eh_frame"). When a module is loaded/unloaded into the process, the
+// unwinder must be informed about the location of this section in memory. The
+// methods of achieving that vary by the platform. On some (e.g. Linux), the
+// unwinder can discover unwind info sections on its own (by dynamically
+// enumerating currently loaded modules via the dl_iterate_phdr() API and
+// finding their ".eh_frame" sections); Others, like Windows, require modules
+// to actively register their unwind info sections via unwinder API.
+//
+// This module defines two symbols which are referenced and called from
+// rsbegin.rs to reigster our information with the GCC runtime. The
+// implementation of stack unwinding is (for now) deferred to libgcc_eh, however
+// Rust crates use these Rust-specific entry points to avoid potential clashes
+// with any GCC runtime.
+#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
+pub mod eh_frame_registry {
+ #[link(name = "gcc_eh")]
+ #[cfg(not(cargobuild))]
+ extern {}
+
+ extern {
+ fn __register_frame_info(eh_frame_begin: *const u8, object: *mut u8);
+ fn __deregister_frame_info(eh_frame_begin: *const u8, object: *mut u8);
+ }
+
+ #[no_mangle]
+ pub unsafe extern fn rust_eh_register_frames(eh_frame_begin: *const u8,
+ object: *mut u8) {
+ __register_frame_info(eh_frame_begin, object);
+ }
+
+ #[no_mangle]
+ pub unsafe extern fn rust_eh_unregister_frames(eh_frame_begin: *const u8,
+ object: *mut u8) {
+ __deregister_frame_info(eh_frame_begin, object);
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of panics via stack unwinding
+//!
+//! This crate is an implementation of panics in Rust using "most native" stack
+//! unwinding mechanism of the platform this is being compiled for. This
+//! essentially gets categorized into three buckets currently:
+//!
+//! 1. MSVC targets use SEH in the `seh.rs` file.
+//! 2. The 64-bit MinGW target half-uses SEH and half-use gcc-like information
+//! in the `seh64_gnu.rs` module.
+//! 3. All other targets use libunwind/libgcc in the `gcc/mod.rs` module.
+//!
+//! More documentation about each implementation can be found in the respective
+//! module.
+
+#![no_std]
+#![crate_name = "panic_unwind"]
+#![crate_type = "rlib"]
+#![unstable(feature = "panic_unwind", issue = "32837")]
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
+ html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+ html_root_url = "https://doc.rust-lang.org/nightly/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
+#![cfg_attr(not(stage0), deny(warnings))]
+
+#![feature(alloc)]
+#![feature(core_intrinsics)]
+#![feature(lang_items)]
+#![feature(libc)]
+#![feature(panic_unwind)]
+#![feature(raw)]
+#![feature(staged_api)]
+#![feature(unwind_attributes)]
+#![cfg_attr(target_env = "msvc", feature(raw))]
+
+#![cfg_attr(not(stage0), panic_runtime)]
+#![cfg_attr(not(stage0), feature(panic_runtime))]
+
+extern crate alloc;
+extern crate libc;
+extern crate unwind;
+
+use core::intrinsics;
+use core::mem;
+use core::raw;
+
+// Rust runtime's startup objects depend on these symbols, so make them public.
+#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
+pub use imp::eh_frame_registry::*;
+
+// *-pc-windows-msvc
+#[cfg(target_env = "msvc")]
+#[path = "seh.rs"]
+mod imp;
+
+// x86_64-pc-windows-gnu
+#[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))]
+#[path = "seh64_gnu.rs"]
+mod imp;
+
+// i686-pc-windows-gnu and all others
+#[cfg(any(unix, all(windows, target_arch = "x86", target_env = "gnu")))]
+#[path = "gcc.rs"]
+mod imp;
+
+mod dwarf;
+mod windows;
+
+// Entry point for catching an exception, implemented using the `try` intrinsic
+// in the compiler.
+//
+// The interaction between the `payload` function and the compiler is pretty
+// hairy and tightly coupled, for more information see the compiler's
+// implementation of this.
+#[no_mangle]
+pub unsafe extern fn __rust_maybe_catch_panic(f: fn(*mut u8),
+ data: *mut u8,
+ data_ptr: *mut usize,
+ vtable_ptr: *mut usize)
+ -> u32 {
+ let mut payload = imp::payload();
+ if intrinsics::try(f, data, &mut payload as *mut _ as *mut _) == 0 {
+ 0
+ } else {
+ let obj = mem::transmute::<_, raw::TraitObject>(imp::cleanup(payload));
+ *data_ptr = obj.data as usize;
+ *vtable_ptr = obj.vtable as usize;
+ 1
+ }
+}
+
+// Entry point for raising an exception, just delegates to the platform-specific
+// implementation.
+#[no_mangle]
+pub unsafe extern fn __rust_start_panic(data: usize, vtable: usize) -> u32 {
+ imp::panic(mem::transmute(raw::TraitObject {
+ data: data as *mut (),
+ vtable: vtable as *mut (),
+ }))
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Windows SEH
+//!
+//! On Windows (currently only on MSVC), the default exception handling
+//! mechanism is Structured Exception Handling (SEH). This is quite different
+//! than Dwarf-based exception handling (e.g. what other unix platforms use) in
+//! terms of compiler internals, so LLVM is required to have a good deal of
+//! extra support for SEH.
+//!
+//! In a nutshell, what happens here is:
+//!
+//! 1. The `panic` function calls the standard Windows function
+//! `_CxxThrowException` to throw a C++-like exception, triggering the
+//! unwinding process.
+//! 2. All landing pads generated by the compiler use the personality function
+//! `__CxxFrameHandler3`, a function in the CRT, and the unwinding code in
+//! Windows will use this personality function to execute all cleanup code on
+//! the stack.
+//! 3. All compiler-generated calls to `invoke` have a landing pad set as a
+//! `cleanuppad` LLVM instruction, which indicates the start of the cleanup
+//! routine. The personality (in step 2, defined in the CRT) is responsible
+//! for running the cleanup routines.
+//! 4. Eventually the "catch" code in the `try` intrinsic (generated by the
+//! compiler) is executed and indicates that control should come back to
+//! Rust. This is done via a `catchswitch` plus a `catchpad` instruction in
+//! LLVM IR terms, finally returning normal control to the program with a
+//! `catchret` instruction.
+//!
+//! Some specific differences from the gcc-based exception handling are:
+//!
+//! * Rust has no custom personality function, it is instead *always*
+//! `__CxxFrameHandler3`. Additionally, no extra filtering is performed, so we
+//! end up catching any C++ exceptions that happen to look like the kind we're
+//! throwing. Note that throwing an exception into Rust is undefined behavior
+//! anyway, so this should be fine.
+//! * We've got some data to transmit across the unwinding boundary,
+//! specifically a `Box<Any + Send>`. Like with Dwarf exceptions
+//! these two pointers are stored as a payload in the exception itself. On
+//! MSVC, however, there's no need for an extra heap allocation because the
+//! call stack is preserved while filter functions are being executed. This
+//! means that the pointers are passed directly to `_CxxThrowException` which
+//! are then recovered in the filter function to be written to the stack frame
+//! of the `try` intrinsic.
+//!
+//! [win64]: http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
+//! [llvm]: http://llvm.org/docs/ExceptionHandling.html#background-on-windows-exceptions
+
+#![allow(bad_style)]
+#![allow(private_no_mangle_fns)]
+
+use alloc::boxed::Box;
+use core::any::Any;
+use core::mem;
+use core::raw;
+
+use windows as c;
+use libc::{c_int, c_uint};
+
+// First up, a whole bunch of type definitions. There's a few platform-specific
+// oddities here, and a lot that's just blatantly copied from LLVM. The purpose
+// of all this is to implement the `panic` function below through a call to
+// `_CxxThrowException`.
+//
+// This function takes two arguments. The first is a pointer to the data we're
+// passing in, which in this case is our trait object. Pretty easy to find! The
+// next, however, is more complicated. This is a pointer to a `_ThrowInfo`
+// structure, and it generally is just intended to just describe the exception
+// being thrown.
+//
+// Currently the definition of this type [1] is a little hairy, and the main
+// oddity (and difference from the online article) is that on 32-bit the
+// pointers are pointers but on 64-bit the pointers are expressed as 32-bit
+// offsets from the `__ImageBase` symbol. The `ptr_t` and `ptr!` macro in the
+// modules below are used to express this.
+//
+// The maze of type definitions also closely follows what LLVM emits for this
+// sort of operation. For example, if you compile this C++ code on MSVC and emit
+// the LLVM IR:
+//
+// #include <stdin.h>
+//
+// void foo() {
+// uint64_t a[2] = {0, 1};
+// throw a;
+// }
+//
+// That's essentially what we're trying to emulate. Most of the constant values
+// below were just copied from LLVM, I'm at least not 100% sure what's going on
+// everywhere. For example the `.PA_K\0` and `.PEA_K\0` strings below (stuck in
+// the names of a few of these) I'm not actually sure what they do, but it seems
+// to mirror what LLVM does!
+//
+// In any case, these structures are all constructed in a similar manner, and
+// it's just somewhat verbose for us.
+//
+// [1]: http://www.geoffchappell.com/studies/msvc/language/predefined/
+
+#[cfg(target_arch = "x86")]
+#[macro_use]
+mod imp {
+ pub type ptr_t = *mut u8;
+ pub const OFFSET: i32 = 4;
+
+ pub const NAME1: [u8; 7] = [b'.', b'P', b'A', b'_', b'K', 0, 0];
+ pub const NAME2: [u8; 7] = [b'.', b'P', b'A', b'X', 0, 0, 0];
+
+ macro_rules! ptr {
+ (0) => (0 as *mut u8);
+ ($e:expr) => ($e as *mut u8);
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[macro_use]
+mod imp {
+ pub type ptr_t = u32;
+ pub const OFFSET: i32 = 8;
+
+ pub const NAME1: [u8; 7] = [b'.', b'P', b'E', b'A', b'_', b'K', 0];
+ pub const NAME2: [u8; 7] = [b'.', b'P', b'E', b'A', b'X', 0, 0];
+
+ extern {
+ pub static __ImageBase: u8;
+ }
+
+ macro_rules! ptr {
+ (0) => (0);
+ ($e:expr) => {
+ (($e as usize) - (&imp::__ImageBase as *const _ as usize)) as u32
+ }
+ }
+}
+
+#[repr(C)]
+pub struct _ThrowInfo {
+ pub attribues: c_uint,
+ pub pnfnUnwind: imp::ptr_t,
+ pub pForwardCompat: imp::ptr_t,
+ pub pCatchableTypeArray: imp::ptr_t,
+}
+
+#[repr(C)]
+pub struct _CatchableTypeArray {
+ pub nCatchableTypes: c_int,
+ pub arrayOfCatchableTypes: [imp::ptr_t; 2],
+}
+
+#[repr(C)]
+pub struct _CatchableType {
+ pub properties: c_uint,
+ pub pType: imp::ptr_t,
+ pub thisDisplacement: _PMD,
+ pub sizeOrOffset: c_int,
+ pub copy_function: imp::ptr_t,
+}
+
+#[repr(C)]
+pub struct _PMD {
+ pub mdisp: c_int,
+ pub pdisp: c_int,
+ pub vdisp: c_int,
+}
+
+#[repr(C)]
+pub struct _TypeDescriptor {
+ pub pVFTable: *const u8,
+ pub spare: *mut u8,
+ pub name: [u8; 7],
+}
+
+static mut THROW_INFO: _ThrowInfo = _ThrowInfo {
+ attribues: 0,
+ pnfnUnwind: ptr!(0),
+ pForwardCompat: ptr!(0),
+ pCatchableTypeArray: ptr!(0),
+};
+
+static mut CATCHABLE_TYPE_ARRAY: _CatchableTypeArray = _CatchableTypeArray {
+ nCatchableTypes: 2,
+ arrayOfCatchableTypes: [
+ ptr!(0),
+ ptr!(0),
+ ],
+};
+
+static mut CATCHABLE_TYPE1: _CatchableType = _CatchableType {
+ properties: 1,
+ pType: ptr!(0),
+ thisDisplacement: _PMD {
+ mdisp: 0,
+ pdisp: -1,
+ vdisp: 0,
+ },
+ sizeOrOffset: imp::OFFSET,
+ copy_function: ptr!(0),
+};
+
+static mut CATCHABLE_TYPE2: _CatchableType = _CatchableType {
+ properties: 1,
+ pType: ptr!(0),
+ thisDisplacement: _PMD {
+ mdisp: 0,
+ pdisp: -1,
+ vdisp: 0,
+ },
+ sizeOrOffset: imp::OFFSET,
+ copy_function: ptr!(0),
+};
+
+extern {
+ // The leading `\x01` byte here is actually a magical signal to LLVM to
+ // *not* apply any other mangling like prefixing with a `_` character.
+ //
+ // This symbol is the vtable used by C++'s `std::type_info`. Objects of type
+ // `std::type_info`, type descriptors, have a pointer to this table. Type
+ // descriptors are referenced by the C++ EH structures defined above and
+ // that we construct below.
+ #[link_name = "\x01??_7type_info@@6B@"]
+ static TYPE_INFO_VTABLE: *const u8;
+}
+
+// We use #[lang = "msvc_try_filter"] here as this is the type descriptor which
+// we'll use in LLVM's `catchpad` instruction which ends up also being passed as
+// an argument to the C++ personality function.
+//
+// Again, I'm not entirely sure what this is describing, it just seems to work.
+#[cfg_attr(all(not(test), not(stage0)),
+ lang = "msvc_try_filter")]
+static mut TYPE_DESCRIPTOR1: _TypeDescriptor = _TypeDescriptor {
+ pVFTable: &TYPE_INFO_VTABLE as *const _ as *const _,
+ spare: 0 as *mut _,
+ name: imp::NAME1,
+};
+
+static mut TYPE_DESCRIPTOR2: _TypeDescriptor = _TypeDescriptor {
+ pVFTable: &TYPE_INFO_VTABLE as *const _ as *const _,
+ spare: 0 as *mut _,
+ name: imp::NAME2,
+};
+
+pub unsafe fn panic(data: Box<Any + Send>) -> u32 {
+ use core::intrinsics::atomic_store;
+
+ // _CxxThrowException executes entirely on this stack frame, so there's no
+ // need to otherwise transfer `data` to the heap. We just pass a stack
+ // pointer to this function.
+ //
+ // The first argument is the payload being thrown (our two pointers), and
+ // the second argument is the type information object describing the
+ // exception (constructed above).
+ let ptrs = mem::transmute::<_, raw::TraitObject>(data);
+ let mut ptrs = [ptrs.data as u64, ptrs.vtable as u64];
+ let mut ptrs_ptr = ptrs.as_mut_ptr();
+
+ // This... may seems surprising, and justifiably so. On 32-bit MSVC the
+ // pointers between these structure are just that, pointers. On 64-bit MSVC,
+ // however, the pointers between structures are rather expressed as 32-bit
+ // offsets from `__ImageBase`.
+ //
+ // Consequently, on 32-bit MSVC we can declare all these pointers in the
+ // `static`s above. On 64-bit MSVC, we would have to express subtraction of
+ // pointers in statics, which Rust does not currently allow, so we can't
+ // actually do that.
+ //
+ // The next best thing, then is to fill in these structures at runtime
+ // (panicking is already the "slow path" anyway). So here we reinterpret all
+ // of these pointer fields as 32-bit integers and then store the
+ // relevant value into it (atomically, as concurrent panics may be
+ // happening). Technically the runtime will probably do a nonatomic read of
+ // these fields, but in theory they never read the *wrong* value so it
+ // shouldn't be too bad...
+ //
+ // In any case, we basically need to do something like this until we can
+ // express more operations in statics (and we may never be able to).
+ atomic_store(&mut THROW_INFO.pCatchableTypeArray as *mut _ as *mut u32,
+ ptr!(&CATCHABLE_TYPE_ARRAY as *const _) as u32);
+ atomic_store(&mut CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[0] as *mut _ as *mut u32,
+ ptr!(&CATCHABLE_TYPE1 as *const _) as u32);
+ atomic_store(&mut CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[1] as *mut _ as *mut u32,
+ ptr!(&CATCHABLE_TYPE2 as *const _) as u32);
+ atomic_store(&mut CATCHABLE_TYPE1.pType as *mut _ as *mut u32,
+ ptr!(&TYPE_DESCRIPTOR1 as *const _) as u32);
+ atomic_store(&mut CATCHABLE_TYPE2.pType as *mut _ as *mut u32,
+ ptr!(&TYPE_DESCRIPTOR2 as *const _) as u32);
+
+ c::_CxxThrowException(&mut ptrs_ptr as *mut _ as *mut _,
+ &mut THROW_INFO as *mut _ as *mut _);
+ u32::max_value()
+}
+
+pub fn payload() -> [u64; 2] {
+ [0; 2]
+}
+
+pub unsafe fn cleanup(payload: [u64; 2]) -> Box<Any + Send> {
+ mem::transmute(raw::TraitObject {
+ data: payload[0] as *mut _,
+ vtable: payload[1] as *mut _,
+ })
+}
+
+#[lang = "msvc_try_filter"]
+#[cfg(stage0)]
+unsafe extern fn __rust_try_filter(_eh_ptrs: *mut u8,
+ _payload: *mut u8) -> i32 {
+ return 0
+}
+
+// This is required by the compiler to exist (e.g. it's a lang item), but
+// it's never actually called by the compiler because __C_specific_handler
+// or _except_handler3 is the personality function that is always used.
+// Hence this is just an aborting stub.
+#[lang = "eh_personality"]
+#[cfg(not(test))]
+fn rust_eh_personality() {
+ unsafe { ::core::intrinsics::abort() }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unwinding implementation of top of native Win64 SEH,
+//! however the unwind handler data (aka LSDA) uses GCC-compatible encoding.
+
+#![allow(bad_style)]
+#![allow(private_no_mangle_fns)]
+
+use alloc::boxed::Box;
+
+use core::any::Any;
+use core::intrinsics;
+use dwarf::eh;
+use windows as c;
+
+// Define our exception codes:
+// according to http://msdn.microsoft.com/en-us/library/het71c37(v=VS.80).aspx,
+// [31:30] = 3 (error), 2 (warning), 1 (info), 0 (success)
+// [29] = 1 (user-defined)
+// [28] = 0 (reserved)
+// we define bits:
+// [24:27] = type
+// [0:23] = magic
+const ETYPE: c::DWORD = 0b1110_u32 << 28;
+const MAGIC: c::DWORD = 0x525354; // "RST"
+
+const RUST_PANIC: c::DWORD = ETYPE | (1 << 24) | MAGIC;
+
+#[repr(C)]
+struct PanicData {
+ data: Box<Any + Send>
+}
+
+pub unsafe fn panic(data: Box<Any + Send>) -> u32 {
+ let panic_ctx = Box::new(PanicData { data: data });
+ let params = [Box::into_raw(panic_ctx) as c::ULONG_PTR];
+ c::RaiseException(RUST_PANIC,
+ c::EXCEPTION_NONCONTINUABLE,
+ params.len() as c::DWORD,
+ ¶ms as *const c::ULONG_PTR);
+ u32::max_value()
+}
+
+pub fn payload() -> *mut u8 {
+ 0 as *mut u8
+}
+
+pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send> {
+ let panic_ctx = Box::from_raw(ptr as *mut PanicData);
+ return panic_ctx.data;
+}
+
+// SEH doesn't support resuming unwinds after calling a landing pad like
+// libunwind does. For this reason, MSVC compiler outlines landing pads into
+// separate functions that can be called directly from the personality function
+// but are nevertheless able to find and modify stack frame of the "parent"
+// function.
+//
+// Since this cannot be done with libdwarf-style landing pads,
+// rust_eh_personality instead catches RUST_PANICs, runs the landing pad, then
+// reraises the exception.
+//
+// Note that it makes certain assumptions about the exception:
+//
+// 1. That RUST_PANIC is non-continuable, so no lower stack frame may choose to
+// resume execution.
+// 2. That the first parameter of the exception is a pointer to an extra data
+// area (PanicData).
+// Since these assumptions do not generally hold true for foreign exceptions
+// (system faults, C++ exceptions, etc), we make no attempt to invoke our
+// landing pads (and, thus, destructors!) for anything other than RUST_PANICs.
+// This is considered acceptable, because the behavior of throwing exceptions
+// through a C ABI boundary is undefined.
+
+#[lang = "eh_personality_catch"]
+#[cfg(not(test))]
+unsafe extern fn rust_eh_personality_catch(
+ exceptionRecord: *mut c::EXCEPTION_RECORD,
+ establisherFrame: c::LPVOID,
+ contextRecord: *mut c::CONTEXT,
+ dispatcherContext: *mut c::DISPATCHER_CONTEXT
+) -> c::EXCEPTION_DISPOSITION
+{
+ rust_eh_personality(exceptionRecord, establisherFrame,
+ contextRecord, dispatcherContext)
+}
+
+#[lang = "eh_personality"]
+#[cfg(not(test))]
+unsafe extern fn rust_eh_personality(
+ exceptionRecord: *mut c::EXCEPTION_RECORD,
+ establisherFrame: c::LPVOID,
+ contextRecord: *mut c::CONTEXT,
+ dispatcherContext: *mut c::DISPATCHER_CONTEXT
+) -> c::EXCEPTION_DISPOSITION
+{
+ let er = &*exceptionRecord;
+ let dc = &*dispatcherContext;
+
+ if er.ExceptionFlags & c::EXCEPTION_UNWIND == 0 { // we are in the dispatch phase
+ if er.ExceptionCode == RUST_PANIC {
+ if let Some(lpad) = find_landing_pad(dc) {
+ c::RtlUnwindEx(establisherFrame,
+ lpad as c::LPVOID,
+ exceptionRecord,
+ er.ExceptionInformation[0] as c::LPVOID, // pointer to PanicData
+ contextRecord,
+ dc.HistoryTable);
+ }
+ }
+ }
+ c::ExceptionContinueSearch
+}
+
+#[lang = "eh_unwind_resume"]
+#[unwind]
+unsafe extern fn rust_eh_unwind_resume(panic_ctx: c::LPVOID) -> ! {
+ let params = [panic_ctx as c::ULONG_PTR];
+ c::RaiseException(RUST_PANIC,
+ c::EXCEPTION_NONCONTINUABLE,
+ params.len() as c::DWORD,
+ ¶ms as *const c::ULONG_PTR);
+ intrinsics::abort();
+}
+
+unsafe fn find_landing_pad(dc: &c::DISPATCHER_CONTEXT) -> Option<usize> {
+ let eh_ctx = eh::EHContext {
+ ip: dc.ControlPc as usize,
+ func_start: dc.ImageBase as usize + (*dc.FunctionEntry).BeginAddress as usize,
+ text_start: dc.ImageBase as usize,
+ data_start: 0
+ };
+ eh::find_landing_pad(dc.HandlerData, &eh_ctx)
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(bad_style)]
+#![allow(dead_code)]
+#![cfg(windows)]
+
+use libc::{c_void, c_ulong, c_long, c_ulonglong};
+
+pub type DWORD = c_ulong;
+pub type LONG = c_long;
+pub type ULONG_PTR = c_ulonglong;
+pub type LPVOID = *mut c_void;
+
+pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
+pub const EXCEPTION_NONCONTINUABLE: DWORD = 0x1; // Noncontinuable exception
+pub const EXCEPTION_UNWINDING: DWORD = 0x2; // Unwind is in progress
+pub const EXCEPTION_EXIT_UNWIND: DWORD = 0x4; // Exit unwind is in progress
+pub const EXCEPTION_TARGET_UNWIND: DWORD = 0x20; // Target unwind in progress
+pub const EXCEPTION_COLLIDED_UNWIND: DWORD = 0x40; // Collided exception handler call
+pub const EXCEPTION_UNWIND: DWORD = EXCEPTION_UNWINDING |
+ EXCEPTION_EXIT_UNWIND |
+ EXCEPTION_TARGET_UNWIND |
+ EXCEPTION_COLLIDED_UNWIND;
+
+#[repr(C)]
+pub struct EXCEPTION_RECORD {
+ pub ExceptionCode: DWORD,
+ pub ExceptionFlags: DWORD,
+ pub ExceptionRecord: *mut EXCEPTION_RECORD,
+ pub ExceptionAddress: LPVOID,
+ pub NumberParameters: DWORD,
+ pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS]
+}
+
+#[repr(C)]
+pub struct EXCEPTION_POINTERS {
+ pub ExceptionRecord: *mut EXCEPTION_RECORD,
+ pub ContextRecord: *mut CONTEXT,
+}
+
+pub enum UNWIND_HISTORY_TABLE {}
+
+#[repr(C)]
+pub struct RUNTIME_FUNCTION {
+ pub BeginAddress: DWORD,
+ pub EndAddress: DWORD,
+ pub UnwindData: DWORD,
+}
+
+pub enum CONTEXT {}
+
+#[repr(C)]
+pub struct DISPATCHER_CONTEXT {
+ pub ControlPc: LPVOID,
+ pub ImageBase: LPVOID,
+ pub FunctionEntry: *const RUNTIME_FUNCTION,
+ pub EstablisherFrame: LPVOID,
+ pub TargetIp: LPVOID,
+ pub ContextRecord: *const CONTEXT,
+ pub LanguageHandler: LPVOID,
+ pub HandlerData: *const u8,
+ pub HistoryTable: *const UNWIND_HISTORY_TABLE,
+}
+
+#[repr(C)]
+pub enum EXCEPTION_DISPOSITION {
+ ExceptionContinueExecution,
+ ExceptionContinueSearch,
+ ExceptionNestedException,
+ ExceptionCollidedUnwind
+}
+pub use self::EXCEPTION_DISPOSITION::*;
+
+extern "system" {
+ #[unwind]
+ pub fn RaiseException(dwExceptionCode: DWORD,
+ dwExceptionFlags: DWORD,
+ nNumberOfArguments: DWORD,
+ lpArguments: *const ULONG_PTR);
+ #[unwind]
+ pub fn RtlUnwindEx(TargetFrame: LPVOID,
+ TargetIp: LPVOID,
+ ExceptionRecord: *const EXCEPTION_RECORD,
+ ReturnValue: LPVOID,
+ OriginalContext: *const CONTEXT,
+ HistoryTable: *const UNWIND_HISTORY_TABLE);
+ #[unwind]
+ pub fn _CxxThrowException(pExceptionObject: *mut c_void,
+ pThrowInfo: *mut u8);
+}
use hir::{self, PatKind};
struct CFGBuilder<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
graph: CFGGraph,
fn_exit: CFGIndex,
loop_scopes: Vec<LoopScope>,
break_index: CFGIndex, // where to go on a `break
}
-pub fn construct(tcx: &TyCtxt,
- blk: &hir::Block) -> CFG {
+pub fn construct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ blk: &hir::Block) -> CFG {
let mut graph = graph::Graph::new();
let entry = graph.add_node(CFGNodeData::Entry);
pub type CFGEdge = graph::Edge<CFGEdgeData>;
impl CFG {
- pub fn new(tcx: &TyCtxt,
- blk: &hir::Block) -> CFG {
+ pub fn new<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ blk: &hir::Block) -> CFG {
construct::construct(tcx, blk)
}
/// read edge from the corresponding AST node. This is used in
/// compiler passes to automatically record the item that they are
/// working on.
-pub fn visit_all_items_in_krate<'tcx,V,F>(tcx: &TyCtxt<'tcx>,
- mut dep_node_fn: F,
- visitor: &mut V)
+pub fn visit_all_items_in_krate<'a, 'tcx, V, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ mut dep_node_fn: F,
+ visitor: &mut V)
where F: FnMut(DefId) -> DepNode<DefId>, V: Visitor<'tcx>
{
struct TrackingVisitor<'visit, 'tcx: 'visit, F: 'visit, V: 'visit> {
- tcx: &'visit TyCtxt<'tcx>,
+ tcx: TyCtxt<'visit, 'tcx, 'tcx>,
dep_node_fn: &'visit mut F,
visitor: &'visit mut V
}
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum Def {
Fn(DefId),
- SelfTy(Option<DefId>, // trait id
- Option<(ast::NodeId, ast::NodeId)>), // (impl id, self type id)
+ SelfTy(Option<DefId> /* trait */, Option<ast::NodeId> /* impl */),
Mod(DefId),
ForeignMod(DefId),
Static(DefId, bool /* is_mutbl */),
use hir;
use hir::map::Definitions;
use hir::map::definitions::DefPathData;
-use hir::def_id::DefIndex;
+use hir::def_id::{DefIndex, DefId};
+use hir::def::Def;
use std::collections::BTreeMap;
use std::iter;
crate_root: Option<&'static str>,
// Use to assign ids to hir nodes that do not directly correspond to an ast node
id_assigner: &'a NodeIdAssigner,
- // We must keep the set of definitions up to date as we add nodes that
- // weren't in the AST.
- definitions: Option<&'a RefCell<Definitions>>,
// As we walk the AST we must keep track of the current 'parent' def id (in
// the form of a DefIndex) so that if we create a new node which introduces
// a definition, then we can properly create the def id.
parent_def: Cell<Option<DefIndex>>,
+ resolver: RefCell<&'a mut Resolver>,
+}
+
+pub trait Resolver {
+ // Resolve a global hir path generated by the lowerer when expanding `for`, `if let`, etc.
+ fn resolve_generated_global_path(&mut self, path: &hir::Path, is_value: bool) -> Def;
+
+ // Record the resolution of a path or binding generated by the lowerer when expanding.
+ fn record_resolution(&mut self, id: NodeId, def: Def);
+
+ // We must keep the set of definitions up to date as we add nodes that weren't in the AST.
+ // This should only return `None` during testing.
+ fn definitions(&mut self) -> Option<&mut Definitions>;
+}
+
+pub struct DummyResolver;
+impl Resolver for DummyResolver {
+ fn resolve_generated_global_path(&mut self, _path: &hir::Path, _is_value: bool) -> Def {
+ Def::Err
+ }
+ fn record_resolution(&mut self, _id: NodeId, _def: Def) {}
+ fn definitions(&mut self) -> Option<&mut Definitions> {
+ None
+ }
}
impl<'a, 'hir> LoweringContext<'a> {
pub fn new(id_assigner: &'a NodeIdAssigner,
c: Option<&Crate>,
- defs: &'a RefCell<Definitions>)
+ resolver: &'a mut Resolver)
-> LoweringContext<'a> {
let crate_root = c.and_then(|c| {
if std_inject::no_core(c) {
LoweringContext {
crate_root: crate_root,
id_assigner: id_assigner,
- definitions: Some(defs),
- parent_def: Cell::new(None),
- }
- }
-
- // Only use this when you want a LoweringContext for testing and won't look
- // up def ids for anything created during lowering.
- pub fn testing_context(id_assigner: &'a NodeIdAssigner) -> LoweringContext<'a> {
- LoweringContext {
- crate_root: None,
- id_assigner: id_assigner,
- definitions: None,
parent_def: Cell::new(None),
+ resolver: RefCell::new(resolver),
}
}
}
fn with_parent_def<T, F: FnOnce() -> T>(&self, parent_id: NodeId, f: F) -> T {
- if self.definitions.is_none() {
- // This should only be used for testing.
- return f();
- }
-
let old_def = self.parent_def.get();
- self.parent_def.set(Some(self.get_def(parent_id)));
+ self.parent_def.set(match self.resolver.borrow_mut().definitions() {
+ Some(defs) => Some(defs.opt_def_index(parent_id).unwrap()),
+ None => old_def,
+ });
+
let result = f();
- self.parent_def.set(old_def);
+ self.parent_def.set(old_def);
result
}
-
- fn get_def(&self, id: NodeId) -> DefIndex {
- let defs = self.definitions.unwrap().borrow();
- defs.opt_def_index(id).unwrap()
- }
}
pub fn lower_ident(_lctx: &LoweringContext, ident: Ident) -> hir::Ident {
};
// let placer = <placer_expr> ;
- let s1 = {
+ let (s1, placer_binding) = {
let placer_expr = signal_block_expr(lctx,
hir_vec![],
placer_expr,
};
// let mut place = Placer::make_place(placer);
- let s2 = {
- let placer = expr_ident(lctx, e.span, placer_ident, None);
+ let (s2, place_binding) = {
+ let placer = expr_ident(lctx, e.span, placer_ident, None, placer_binding);
let call = make_call(lctx, &make_place, hir_vec![placer]);
mk_stmt_let_mut(lctx, place_ident, call)
};
// let p_ptr = Place::pointer(&mut place);
- let s3 = {
- let agent = expr_ident(lctx, e.span, place_ident, None);
+ let (s3, p_ptr_binding) = {
+ let agent = expr_ident(lctx, e.span, place_ident, None, place_binding);
let args = hir_vec![expr_mut_addr_of(lctx, e.span, agent, None)];
let call = make_call(lctx, &place_pointer, args);
mk_stmt_let(lctx, p_ptr_ident, call)
// InPlace::finalize(place)
// })
let expr = {
- let ptr = expr_ident(lctx, e.span, p_ptr_ident, None);
+ let ptr = expr_ident(lctx, e.span, p_ptr_ident, None, p_ptr_binding);
let call_move_val_init =
hir::StmtSemi(
make_call(lctx, &move_val_init, hir_vec![ptr, pop_unsafe_expr]),
lctx.next_id());
let call_move_val_init = respan(e.span, call_move_val_init);
- let place = expr_ident(lctx, e.span, place_ident, None);
+ let place = expr_ident(lctx, e.span, place_ident, None, place_binding);
let call = make_call(lctx, &inplace_finalize, hir_vec![place]);
signal_block_expr(lctx,
hir_vec![call_move_val_init],
let loop_expr = hir::ExprLoop(loop_block,
opt_ident.map(|ident| lower_ident(lctx, ident)));
// add attributes to the outer returned expr node
- return expr(lctx, e.span, loop_expr, e.attrs.clone());
+ let attrs = e.attrs.clone();
+ return P(hir::Expr { id: e.id, node: loop_expr, span: e.span, attrs: attrs });
}
// Desugar ExprForLoop
arm(hir_vec![pat_none(lctx, e.span)], break_expr)
};
+ // `mut iter`
+ let iter_pat =
+ pat_ident_binding_mode(lctx, e.span, iter, hir::BindByValue(hir::MutMutable));
+
// `match ::std::iter::Iterator::next(&mut iter) { ... }`
let match_expr = {
let next_path = {
path_global(e.span, strs)
};
- let iter = expr_ident(lctx, e.span, iter, None);
+ let iter = expr_ident(lctx, e.span, iter, None, iter_pat.id);
let ref_mut_iter = expr_mut_addr_of(lctx, e.span, iter, None);
let next_path = expr_path(lctx, next_path, None);
let next_expr = expr_call(lctx,
let loop_block = block_expr(lctx, match_expr);
let loop_expr = hir::ExprLoop(loop_block,
opt_ident.map(|ident| lower_ident(lctx, ident)));
- let loop_expr = expr(lctx, e.span, loop_expr, None);
+ let loop_expr =
+ P(hir::Expr { id: e.id, node: loop_expr, span: e.span, attrs: None });
// `mut iter => { ... }`
- let iter_arm = {
- let iter_pat = pat_ident_binding_mode(lctx,
- e.span,
- iter,
- hir::BindByValue(hir::MutMutable));
- arm(hir_vec![iter_pat], loop_expr)
- };
+ let iter_arm = arm(hir_vec![iter_pat], loop_expr);
// `match ::std::iter::IntoIterator::into_iter(<head>) { ... }`
let into_iter_expr = {
// `{ let _result = ...; _result }`
// underscore prevents an unused_variables lint if the head diverges
let result_ident = lctx.str_to_ident("_result");
- let let_stmt = stmt_let(lctx,
- e.span,
- false,
- result_ident,
- match_expr,
- None);
- let result = expr_ident(lctx, e.span, result_ident, None);
+ let (let_stmt, let_stmt_binding) =
+ stmt_let(lctx, e.span, false, result_ident, match_expr, None);
+
+ let result = expr_ident(lctx, e.span, result_ident, None, let_stmt_binding);
let block = block_all(lctx, e.span, hir_vec![let_stmt], Some(result));
// add the attributes to the outer returned expr node
return expr_block(lctx, block, e.attrs.clone());
let ok_arm = {
let val_ident = lctx.str_to_ident("val");
let val_pat = pat_ident(lctx, e.span, val_ident);
- let val_expr = expr_ident(lctx, e.span, val_ident, None);
+ let val_expr = expr_ident(lctx, e.span, val_ident, None, val_pat.id);
let ok_pat = pat_ok(lctx, e.span, val_pat);
arm(hir_vec![ok_pat], val_expr)
// Err(err) => return Err(From::from(err))
let err_arm = {
let err_ident = lctx.str_to_ident("err");
+ let err_local = pat_ident(lctx, e.span, err_ident);
let from_expr = {
let path = std_path(lctx, &["convert", "From", "from"]);
let path = path_global(e.span, path);
let from = expr_path(lctx, path, None);
- let err_expr = expr_ident(lctx, e.span, err_ident, None);
+ let err_expr = expr_ident(lctx, e.span, err_ident, None, err_local.id);
expr_call(lctx, e.span, from, hir_vec![err_expr], None)
};
let err_ctor = expr_path(lctx, path, None);
expr_call(lctx, e.span, err_ctor, hir_vec![from_expr], None)
};
- let err_pat = pat_err(lctx, e.span,
- pat_ident(lctx, e.span, err_ident));
+ let err_pat = pat_err(lctx, e.span, err_local);
let ret_expr = expr(lctx, e.span,
hir::Expr_::ExprRet(Some(err_expr)), None);
}
fn expr_ident(lctx: &LoweringContext, span: Span, id: hir::Ident,
- attrs: ThinAttributes) -> P<hir::Expr> {
- expr_path(lctx, path_ident(span, id), attrs)
+ attrs: ThinAttributes, binding: NodeId) -> P<hir::Expr> {
+ let expr = expr(lctx, span, hir::ExprPath(None, path_ident(span, id)), attrs);
+
+ let mut resolver = lctx.resolver.borrow_mut();
+ let def = resolver.definitions().map(|defs| Def::Local(defs.local_def_id(binding), binding))
+ .unwrap_or(Def::Err);
+ resolver.record_resolution(expr.id, def);
+
+ expr
}
fn expr_mut_addr_of(lctx: &LoweringContext, span: Span, e: P<hir::Expr>,
fn expr_path(lctx: &LoweringContext, path: hir::Path,
attrs: ThinAttributes) -> P<hir::Expr> {
- expr(lctx, path.span, hir::ExprPath(None, path), attrs)
+ let def = lctx.resolver.borrow_mut().resolve_generated_global_path(&path, true);
+ let expr = expr(lctx, path.span, hir::ExprPath(None, path), attrs);
+ lctx.resolver.borrow_mut().record_resolution(expr.id, def);
+ expr
}
fn expr_match(lctx: &LoweringContext,
fields: hir::HirVec<hir::Field>,
e: Option<P<hir::Expr>>,
attrs: ThinAttributes) -> P<hir::Expr> {
- expr(lctx, sp, hir::ExprStruct(path, fields, e), attrs)
+ let def = lctx.resolver.borrow_mut().resolve_generated_global_path(&path, false);
+ let expr = expr(lctx, sp, hir::ExprStruct(path, fields, e), attrs);
+ lctx.resolver.borrow_mut().record_resolution(expr.id, def);
+ expr
+
}
fn expr(lctx: &LoweringContext, span: Span, node: hir::Expr_,
ident: hir::Ident,
ex: P<hir::Expr>,
attrs: ThinAttributes)
- -> hir::Stmt {
+ -> (hir::Stmt, NodeId) {
let pat = if mutbl {
pat_ident_binding_mode(lctx, sp, ident, hir::BindByValue(hir::MutMutable))
} else {
pat_ident(lctx, sp, ident)
};
+ let pat_id = pat.id;
let local = P(hir::Local {
pat: pat,
ty: None,
attrs: attrs,
});
let decl = respan(sp, hir::DeclLocal(local));
- respan(sp, hir::StmtDecl(P(decl), lctx.next_id()))
+ (respan(sp, hir::StmtDecl(P(decl), lctx.next_id())), pat_id)
}
fn block_expr(lctx: &LoweringContext, expr: P<hir::Expr>) -> P<hir::Block> {
path: hir::Path,
subpats: hir::HirVec<P<hir::Pat>>)
-> P<hir::Pat> {
+ let def = lctx.resolver.borrow_mut().resolve_generated_global_path(&path, true);
let pt = if subpats.is_empty() {
hir::PatKind::Path(path)
} else {
hir::PatKind::TupleStruct(path, Some(subpats))
};
- pat(lctx, span, pt)
+ let pat = pat(lctx, span, pt);
+ lctx.resolver.borrow_mut().record_resolution(pat.id, def);
+ pat
}
fn pat_ident(lctx: &LoweringContext, span: Span, ident: hir::Ident) -> P<hir::Pat> {
let pat = pat(lctx, span, pat_ident);
- if let Some(defs) = lctx.definitions {
- let mut defs = defs.borrow_mut();
- defs.create_def_with_parent(lctx.parent_def.get(),
- pat.id,
- DefPathData::Binding(ident.name));
- }
+ let mut resolver = lctx.resolver.borrow_mut();
+ let def = resolver.definitions().map(|defs| {
+ let def_path_data = DefPathData::Binding(ident.name);
+ let def_index = defs.create_def_with_parent(lctx.parent_def.get(), pat.id, def_path_data);
+ Def::Local(DefId::local(def_index), pat.id)
+ }).unwrap_or(Def::Err);
+ resolver.record_resolution(pat.id, def);
pat
}
self.opt_def_index(node).map(DefId::local)
}
+ pub fn local_def_id(&self, node: ast::NodeId) -> DefId {
+ self.opt_local_def_id(node).unwrap()
+ }
+
pub fn as_local_node_id(&self, def_id: DefId) -> Option<ast::NodeId> {
if def_id.krate == LOCAL_CRATE {
assert!(def_id.index.as_usize() < self.data.len());
pub type CaptureModeMap = NodeMap<CaptureClause>;
+#[derive(Clone)]
pub struct TraitCandidate {
pub def_id: DefId,
pub import_id: Option<NodeId>,
}
}
-pub fn def_to_path(tcx: &TyCtxt, id: DefId) -> hir::Path {
+pub fn def_to_path<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> hir::Path {
let name = tcx.item_name(id);
hir::Path::from_ident(DUMMY_SP, hir::Ident::from_name(name))
}
//! In particular, it might be enough to say (A,B) are bivariant for
//! all (A,B).
-use super::combine::{self, CombineFields};
+use super::combine::CombineFields;
use super::type_variable::{BiTo};
use ty::{self, Ty, TyCtxt};
use ty::TyVar;
use ty::relate::{Relate, RelateResult, TypeRelation};
-pub struct Bivariate<'a, 'tcx: 'a> {
- fields: CombineFields<'a, 'tcx>
+pub struct Bivariate<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fields: CombineFields<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> Bivariate<'a, 'tcx> {
- pub fn new(fields: CombineFields<'a, 'tcx>) -> Bivariate<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Bivariate<'a, 'gcx, 'tcx> {
+ pub fn new(fields: CombineFields<'a, 'gcx, 'tcx>) -> Bivariate<'a, 'gcx, 'tcx> {
Bivariate { fields: fields }
}
}
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Bivariate<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Bivariate<'a, 'gcx, 'tcx> {
fn tag(&self) -> &'static str { "Bivariate" }
- fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.fields.tcx() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- variance: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ variance: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>
{
match variance {
// If we have Foo<A> and Foo is invariant w/r/t A,
}
_ => {
- combine::super_combine_tys(self.fields.infcx, self, a, b)
+ self.fields.infcx.super_combine_tys(self, a, b)
}
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a,'tcx>
+ where T: Relate<'tcx>
{
let a1 = self.tcx().erase_late_bound_regions(a);
let b1 = self.tcx().erase_late_bound_regions(b);
use syntax::codemap::Span;
#[derive(Clone)]
-pub struct CombineFields<'a, 'tcx: 'a> {
- pub infcx: &'a InferCtxt<'a, 'tcx>,
+pub struct CombineFields<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ pub infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
pub a_is_expected: bool,
pub trace: TypeTrace<'tcx>,
pub cause: Option<ty::relate::Cause>,
pub obligations: PredicateObligations<'tcx>,
}
-pub fn super_combine_tys<'a,'tcx:'a,R>(infcx: &InferCtxt<'a, 'tcx>,
- relation: &mut R,
- a: Ty<'tcx>,
- b: Ty<'tcx>)
- -> RelateResult<'tcx, Ty<'tcx>>
- where R: TypeRelation<'a,'tcx>
-{
- let a_is_expected = relation.a_is_expected();
-
- match (&a.sty, &b.sty) {
- // Relate integral variables to other types
- (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
- infcx.int_unification_table
- .borrow_mut()
- .unify_var_var(a_id, b_id)
- .map_err(|e| int_unification_error(a_is_expected, e))?;
- Ok(a)
- }
- (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
- unify_integral_variable(infcx, a_is_expected, v_id, IntType(v))
- }
- (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
- unify_integral_variable(infcx, !a_is_expected, v_id, IntType(v))
- }
- (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
- unify_integral_variable(infcx, a_is_expected, v_id, UintType(v))
- }
- (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
- unify_integral_variable(infcx, !a_is_expected, v_id, UintType(v))
- }
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
+ pub fn super_combine_tys<R>(&self,
+ relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>
+ {
+ let a_is_expected = relation.a_is_expected();
+
+ match (&a.sty, &b.sty) {
+ // Relate integral variables to other types
+ (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
+ self.int_unification_table
+ .borrow_mut()
+ .unify_var_var(a_id, b_id)
+ .map_err(|e| int_unification_error(a_is_expected, e))?;
+ Ok(a)
+ }
+ (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
+ self.unify_integral_variable(a_is_expected, v_id, IntType(v))
+ }
+ (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+ self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
+ }
+ (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
+ self.unify_integral_variable(a_is_expected, v_id, UintType(v))
+ }
+ (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+ self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
+ }
- // Relate floating-point variables to other types
- (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
- infcx.float_unification_table
- .borrow_mut()
- .unify_var_var(a_id, b_id)
- .map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
- Ok(a)
- }
- (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
- unify_float_variable(infcx, a_is_expected, v_id, v)
- }
- (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
- unify_float_variable(infcx, !a_is_expected, v_id, v)
- }
+ // Relate floating-point variables to other types
+ (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
+ self.float_unification_table
+ .borrow_mut()
+ .unify_var_var(a_id, b_id)
+ .map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
+ Ok(a)
+ }
+ (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
+ self.unify_float_variable(a_is_expected, v_id, v)
+ }
+ (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
+ self.unify_float_variable(!a_is_expected, v_id, v)
+ }
- // All other cases of inference are errors
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) => {
- Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
- }
+ // All other cases of inference are errors
+ (&ty::TyInfer(_), _) |
+ (_, &ty::TyInfer(_)) => {
+ Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
+ }
- _ => {
- ty::relate::super_relate_tys(relation, a, b)
+ _ => {
+ ty::relate::super_relate_tys(relation, a, b)
+ }
}
}
-}
-fn unify_integral_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- vid_is_expected: bool,
- vid: ty::IntVid,
- val: ty::IntVarValue)
- -> RelateResult<'tcx, Ty<'tcx>>
-{
- infcx.int_unification_table
- .borrow_mut()
- .unify_var_value(vid, val)
- .map_err(|e| int_unification_error(vid_is_expected, e))?;
- match val {
- IntType(v) => Ok(infcx.tcx.mk_mach_int(v)),
- UintType(v) => Ok(infcx.tcx.mk_mach_uint(v)),
+ fn unify_integral_variable(&self,
+ vid_is_expected: bool,
+ vid: ty::IntVid,
+ val: ty::IntVarValue)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ {
+ self.int_unification_table
+ .borrow_mut()
+ .unify_var_value(vid, val)
+ .map_err(|e| int_unification_error(vid_is_expected, e))?;
+ match val {
+ IntType(v) => Ok(self.tcx.mk_mach_int(v)),
+ UintType(v) => Ok(self.tcx.mk_mach_uint(v)),
+ }
}
-}
-fn unify_float_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- vid_is_expected: bool,
- vid: ty::FloatVid,
- val: ast::FloatTy)
- -> RelateResult<'tcx, Ty<'tcx>>
-{
- infcx.float_unification_table
- .borrow_mut()
- .unify_var_value(vid, val)
- .map_err(|e| float_unification_error(vid_is_expected, e))?;
- Ok(infcx.tcx.mk_mach_float(val))
+ fn unify_float_variable(&self,
+ vid_is_expected: bool,
+ vid: ty::FloatVid,
+ val: ast::FloatTy)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ {
+ self.float_unification_table
+ .borrow_mut()
+ .unify_var_value(vid, val)
+ .map_err(|e| float_unification_error(vid_is_expected, e))?;
+ Ok(self.tcx.mk_mach_float(val))
+ }
}
-impl<'a, 'tcx> CombineFields<'a, 'tcx> {
- pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.infcx.tcx
}
- pub fn switch_expected(&self) -> CombineFields<'a, 'tcx> {
+ pub fn switch_expected(&self) -> CombineFields<'a, 'gcx, 'tcx> {
CombineFields {
a_is_expected: !self.a_is_expected,
..(*self).clone()
}
}
- pub fn equate(&self) -> Equate<'a, 'tcx> {
+ pub fn equate(&self) -> Equate<'a, 'gcx, 'tcx> {
Equate::new(self.clone())
}
- pub fn bivariate(&self) -> Bivariate<'a, 'tcx> {
+ pub fn bivariate(&self) -> Bivariate<'a, 'gcx, 'tcx> {
Bivariate::new(self.clone())
}
- pub fn sub(&self) -> Sub<'a, 'tcx> {
+ pub fn sub(&self) -> Sub<'a, 'gcx, 'tcx> {
Sub::new(self.clone())
}
- pub fn lub(&self) -> Lub<'a, 'tcx> {
+ pub fn lub(&self) -> Lub<'a, 'gcx, 'tcx> {
Lub::new(self.clone())
}
- pub fn glb(&self) -> Glb<'a, 'tcx> {
+ pub fn glb(&self) -> Glb<'a, 'gcx, 'tcx> {
Glb::new(self.clone())
}
}
}
-struct Generalizer<'cx, 'tcx:'cx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+struct Generalizer<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
+ infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
span: Span,
for_vid: ty::TyVid,
make_region_vars: bool,
cycle_detected: bool,
}
-impl<'cx, 'tcx> ty::fold::TypeFolder<'tcx> for Generalizer<'cx, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> {
+impl<'cx, 'gcx, 'tcx> ty::fold::TypeFolder<'gcx, 'tcx> for Generalizer<'cx, 'gcx, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.infcx.tcx
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::combine::{self, CombineFields};
-use super::higher_ranked::HigherRankedRelations;
+use super::combine::CombineFields;
use super::{Subtype};
use super::type_variable::{EqTo};
use traits::PredicateObligations;
/// Ensures `a` is made equal to `b`. Returns `a` on success.
-pub struct Equate<'a, 'tcx: 'a> {
- fields: CombineFields<'a, 'tcx>
+pub struct Equate<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fields: CombineFields<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> Equate<'a, 'tcx> {
- pub fn new(fields: CombineFields<'a, 'tcx>) -> Equate<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Equate<'a, 'gcx, 'tcx> {
+ pub fn new(fields: CombineFields<'a, 'gcx, 'tcx>) -> Equate<'a, 'gcx, 'tcx> {
Equate { fields: fields }
}
}
}
-impl<'a, 'tcx> TypeRelation<'a,'tcx> for Equate<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Equate<'a, 'gcx, 'tcx> {
fn tag(&self) -> &'static str { "Equate" }
- fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.fields.tcx() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- _: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ _: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>
{
self.relate(a, b)
}
}
_ => {
- combine::super_combine_tys(self.fields.infcx, self, a, b)?;
+ self.fields.infcx.super_combine_tys(self, a, b)?;
Ok(a)
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
self.fields.higher_ranked_sub(a, b)?;
self.fields.higher_ranked_sub(b, a)
use syntax::parse::token;
use syntax::ptr::P;
-impl<'tcx> TyCtxt<'tcx> {
- pub fn note_and_explain_region(&self,
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn note_and_explain_region(self,
err: &mut DiagnosticBuilder,
prefix: &str,
region: ty::Region,
}
}
- fn explain_span(tcx: &TyCtxt, heading: &str, span: Span)
- -> (String, Option<Span>) {
+ fn explain_span<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ heading: &str, span: Span)
+ -> (String, Option<Span>) {
let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo);
(format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()),
Some(span))
}
}
-pub trait ErrorReporting<'tcx> {
- fn report_region_errors(&self,
- errors: &Vec<RegionResolutionError<'tcx>>);
-
- fn process_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>)
- -> Option<Vec<RegionResolutionError<'tcx>>>;
-
- fn report_type_error(&self,
- trace: TypeTrace<'tcx>,
- terr: &TypeError<'tcx>)
- -> DiagnosticBuilder<'tcx>;
-
- fn check_and_note_conflicting_crates(&self,
- err: &mut DiagnosticBuilder,
- terr: &TypeError<'tcx>,
- sp: Span);
-
- fn report_and_explain_type_error(&self,
- trace: TypeTrace<'tcx>,
- terr: &TypeError<'tcx>)
- -> DiagnosticBuilder<'tcx>;
-
- fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<(String, String)>;
-
- fn expected_found_str<T: fmt::Display + Resolvable<'tcx> + TypeFoldable<'tcx>>(
- &self,
- exp_found: &ty::error::ExpectedFound<T>)
- -> Option<(String, String)>;
-
- fn report_concrete_failure(&self,
- origin: SubregionOrigin<'tcx>,
- sub: Region,
- sup: Region)
- -> DiagnosticBuilder<'tcx>;
-
- fn report_generic_bound_failure(&self,
- origin: SubregionOrigin<'tcx>,
- kind: GenericKind<'tcx>,
- sub: Region);
-
- fn report_sub_sup_conflict(&self,
- var_origin: RegionVariableOrigin,
- sub_origin: SubregionOrigin<'tcx>,
- sub_region: Region,
- sup_origin: SubregionOrigin<'tcx>,
- sup_region: Region);
-
- fn report_processed_errors(&self,
- origins: &[ProcessedErrorOrigin<'tcx>],
- same_regions: &[SameRegions]);
-
- fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]);
-}
-
-trait ErrorReportingHelpers<'tcx> {
- fn report_inference_failure(&self,
- var_origin: RegionVariableOrigin)
- -> DiagnosticBuilder<'tcx>;
-
- fn note_region_origin(&self,
- err: &mut DiagnosticBuilder,
- origin: &SubregionOrigin<'tcx>);
-
- fn give_expl_lifetime_param(&self,
- err: &mut DiagnosticBuilder,
- decl: &hir::FnDecl,
- unsafety: hir::Unsafety,
- constness: hir::Constness,
- name: ast::Name,
- opt_explicit_self: Option<&hir::ExplicitSelf_>,
- generics: &hir::Generics,
- span: Span);
-}
-
-impl<'a, 'tcx> ErrorReporting<'tcx> for InferCtxt<'a, 'tcx> {
- fn report_region_errors(&self,
- errors: &Vec<RegionResolutionError<'tcx>>) {
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
+ pub fn report_region_errors(&self,
+ errors: &Vec<RegionResolutionError<'tcx>>) {
debug!("report_region_errors(): {} errors to start", errors.len());
// try to pre-process the errors, which will group some of them
}
}
- fn free_regions_from_same_fn(tcx: &TyCtxt,
- sub: Region,
- sup: Region)
- -> Option<FreeRegionsFromSameFn> {
+ fn free_regions_from_same_fn<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ sub: Region,
+ sup: Region)
+ -> Option<FreeRegionsFromSameFn> {
debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup);
let (scope_id, fr1, fr2) = match (sub, sup) {
(ReFree(fr1), ReFree(fr2)) => {
}
}
- fn report_and_explain_type_error(&self,
- trace: TypeTrace<'tcx>,
- terr: &TypeError<'tcx>)
- -> DiagnosticBuilder<'tcx> {
+ pub fn report_and_explain_type_error(&self,
+ trace: TypeTrace<'tcx>,
+ terr: &TypeError<'tcx>)
+ -> DiagnosticBuilder<'tcx> {
let span = trace.origin.span();
let mut err = self.report_type_error(trace, terr);
self.tcx.note_and_explain_type_err(&mut err, terr, span);
}
infer::Reborrow(span) => {
let mut err = struct_span_err!(self.tcx.sess, span, E0312,
- "lifetime of reference outlines \
+ "lifetime of reference outlives \
lifetime of borrowed content...");
self.tcx.note_and_explain_region(&mut err,
"...the reference is valid for ",
region_names: &'a HashSet<ast::Name>
}
-struct Rebuilder<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+struct Rebuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
fn_decl: &'a hir::FnDecl,
expl_self_opt: Option<&'a hir::ExplicitSelf_>,
generics: &'a hir::Generics,
Kept
}
-impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>,
+impl<'a, 'gcx, 'tcx> Rebuilder<'a, 'gcx, 'tcx> {
+ fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
fn_decl: &'a hir::FnDecl,
expl_self_opt: Option<&'a hir::ExplicitSelf_>,
generics: &'a hir::Generics,
same_regions: &'a [SameRegions],
life_giver: &'a LifeGiver)
- -> Rebuilder<'a, 'tcx> {
+ -> Rebuilder<'a, 'gcx, 'tcx> {
Rebuilder {
tcx: tcx,
fn_decl: fn_decl,
}
}
-impl<'a, 'tcx> ErrorReportingHelpers<'tcx> for InferCtxt<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
fn give_expl_lifetime_param(&self,
err: &mut DiagnosticBuilder,
decl: &hir::FnDecl,
}
pub trait Resolvable<'tcx> {
- fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Self;
+ fn resolve<'a, 'gcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Self;
}
impl<'tcx> Resolvable<'tcx> for Ty<'tcx> {
- fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Ty<'tcx> {
+ fn resolve<'a, 'gcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
infcx.resolve_type_vars_if_possible(self)
}
}
impl<'tcx> Resolvable<'tcx> for ty::TraitRef<'tcx> {
- fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>)
- -> ty::TraitRef<'tcx> {
+ fn resolve<'a, 'gcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>)
+ -> ty::TraitRef<'tcx> {
infcx.resolve_type_vars_if_possible(self)
}
}
impl<'tcx> Resolvable<'tcx> for ty::PolyTraitRef<'tcx> {
- fn resolve<'a>(&self,
- infcx: &InferCtxt<'a, 'tcx>)
- -> ty::PolyTraitRef<'tcx>
+ fn resolve<'a, 'gcx>(&self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>)
+ -> ty::PolyTraitRef<'tcx>
{
infcx.resolve_type_vars_if_possible(self)
}
}
-fn lifetimes_in_scope(tcx: &TyCtxt,
- scope_id: ast::NodeId)
- -> Vec<hir::LifetimeDef> {
+fn lifetimes_in_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ scope_id: ast::NodeId)
+ -> Vec<hir::LifetimeDef> {
let mut taken = Vec::new();
let parent = tcx.map.get_parent(scope_id);
let method_id_opt = match tcx.map.find(parent) {
use super::InferCtxt;
use super::unify_key::ToType;
-pub struct TypeFreshener<'a, 'tcx:'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+pub struct TypeFreshener<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
freshen_count: u32,
freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
}
-impl<'a, 'tcx> TypeFreshener<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> TypeFreshener<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>)
+ -> TypeFreshener<'a, 'gcx, 'tcx> {
TypeFreshener {
infcx: infcx,
freshen_count: 0,
}
}
-impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
- fn tcx<'b>(&'b self) -> &'b TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> {
self.infcx.tcx
}
// except according to those terms.
use super::combine::CombineFields;
-use super::higher_ranked::HigherRankedRelations;
use super::InferCtxt;
use super::lattice::{self, LatticeDir};
use super::Subtype;
use traits::PredicateObligations;
/// "Greatest lower bound" (common subtype)
-pub struct Glb<'a, 'tcx: 'a> {
- fields: CombineFields<'a, 'tcx>
+pub struct Glb<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fields: CombineFields<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> Glb<'a, 'tcx> {
- pub fn new(fields: CombineFields<'a, 'tcx>) -> Glb<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Glb<'a, 'gcx, 'tcx> {
+ pub fn new(fields: CombineFields<'a, 'gcx, 'tcx>) -> Glb<'a, 'gcx, 'tcx> {
Glb { fields: fields }
}
}
}
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Glb<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Glb<'a, 'gcx, 'tcx> {
fn tag(&self) -> &'static str { "Glb" }
- fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.fields.tcx() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- variance: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ variance: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>
{
match variance {
ty::Invariant => self.fields.equate().relate(a, b),
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
self.fields.higher_ranked_glb(a, b)
}
}
-impl<'a, 'tcx> LatticeDir<'a,'tcx> for Glb<'a, 'tcx> {
- fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> LatticeDir<'a, 'gcx, 'tcx> for Glb<'a, 'gcx, 'tcx> {
+ fn infcx(&self) -> &'a InferCtxt<'a, 'gcx, 'tcx> {
self.fields.infcx
}
use syntax::codemap::Span;
use util::nodemap::{FnvHashMap, FnvHashSet};
-pub trait HigherRankedRelations<'a,'tcx> {
- fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'a,'tcx>;
-
- fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'a,'tcx>;
-
- fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'a,'tcx>;
-}
-
-trait InferCtxtExt {
- fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region>;
-
- fn region_vars_confined_to_snapshot(&self,
- snapshot: &CombinedSnapshot)
- -> Vec<ty::RegionVid>;
-}
-
-impl<'a,'tcx> HigherRankedRelations<'a,'tcx> for CombineFields<'a,'tcx> {
- fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>)
- -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'a,'tcx>
+impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> {
+ pub fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>)
+ -> RelateResult<'tcx, Binder<T>>
+ where T: Relate<'tcx>
{
debug!("higher_ranked_sub(a={:?}, b={:?})",
a, b);
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
- match leak_check(self.infcx, &skol_map, snapshot) {
- Ok(()) => { }
- Err((skol_br, tainted_region)) => {
- if self.a_is_expected {
- debug!("Not as polymorphic!");
- return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br,
- tainted_region));
- } else {
- debug!("Overly polymorphic!");
- return Err(TypeError::RegionsOverlyPolymorphic(skol_br,
- tainted_region));
- }
- }
- }
+ self.infcx.leak_check(!self.a_is_expected, &skol_map, snapshot)?;
- debug!("higher_ranked_sub: OK result={:?}",
- result);
+ debug!("higher_ranked_sub: OK result={:?}", result);
Ok(ty::Binder(result))
});
}
- fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'a,'tcx>
+ pub fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>)
+ -> RelateResult<'tcx, Binder<T>>
+ where T: Relate<'tcx>
{
// Start a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
Ok(ty::Binder(result1))
});
- fn generalize_region(infcx: &InferCtxt,
- span: Span,
- snapshot: &CombinedSnapshot,
- debruijn: ty::DebruijnIndex,
- new_vars: &[ty::RegionVid],
- a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
- r0: ty::Region)
- -> ty::Region {
+ fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ span: Span,
+ snapshot: &CombinedSnapshot,
+ debruijn: ty::DebruijnIndex,
+ new_vars: &[ty::RegionVid],
+ a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
+ r0: ty::Region)
+ -> ty::Region {
// Regions that pre-dated the LUB computation stay as they are.
if !is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
}
}
- fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'a,'tcx>
+ pub fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>)
+ -> RelateResult<'tcx, Binder<T>>
+ where T: Relate<'tcx>
{
debug!("higher_ranked_glb({:?}, {:?})",
a, b);
Ok(ty::Binder(result1))
});
- fn generalize_region(infcx: &InferCtxt,
- span: Span,
- snapshot: &CombinedSnapshot,
- debruijn: ty::DebruijnIndex,
- new_vars: &[ty::RegionVid],
- a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
- a_vars: &[ty::RegionVid],
- b_vars: &[ty::RegionVid],
- r0: ty::Region) -> ty::Region {
+ fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ span: Span,
+ snapshot: &CombinedSnapshot,
+ debruijn: ty::DebruijnIndex,
+ new_vars: &[ty::RegionVid],
+ a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
+ a_vars: &[ty::RegionVid],
+ b_vars: &[ty::RegionVid],
+ r0: ty::Region) -> ty::Region {
if !is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
return r0;
}
}
-fn var_ids<'a, 'tcx>(fields: &CombineFields<'a, 'tcx>,
- map: &FnvHashMap<ty::BoundRegion, ty::Region>)
- -> Vec<ty::RegionVid> {
+fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>,
+ map: &FnvHashMap<ty::BoundRegion, ty::Region>)
+ -> Vec<ty::RegionVid> {
map.iter()
.map(|(_, r)| match *r {
ty::ReVar(r) => { r }
}
}
-fn fold_regions_in<'tcx, T, F>(tcx: &TyCtxt<'tcx>,
- unbound_value: &T,
- mut fldr: F)
- -> T
+fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ unbound_value: &T,
+ mut fldr: F)
+ -> T
where T: TypeFoldable<'tcx>,
F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
{
})
}
-impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
}
region_vars
}
-}
-pub fn skolemize_late_bound_regions<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- binder: &ty::Binder<T>,
- snapshot: &CombinedSnapshot)
- -> (T, SkolemizationMap)
- where T : TypeFoldable<'tcx>
-{
- /*!
- * Replace all regions bound by `binder` with skolemized regions and
- * return a map indicating which bound-region was replaced with what
- * skolemized region. This is the first step of checking subtyping
- * when higher-ranked things are involved. See `README.md` for more
- * details.
- */
-
- let (result, map) = infcx.tcx.replace_late_bound_regions(binder, |br| {
- infcx.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot)
- });
-
- debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})",
- binder,
- result,
- map);
-
- (result, map)
-}
+ pub fn skolemize_late_bound_regions<T>(&self,
+ binder: &ty::Binder<T>,
+ snapshot: &CombinedSnapshot)
+ -> (T, SkolemizationMap)
+ where T : TypeFoldable<'tcx>
+ {
+ /*!
+ * Replace all regions bound by `binder` with skolemized regions and
+ * return a map indicating which bound-region was replaced with what
+ * skolemized region. This is the first step of checking subtyping
+ * when higher-ranked things are involved. See `README.md` for more
+ * details.
+ */
-pub fn leak_check<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- skol_map: &SkolemizationMap,
- snapshot: &CombinedSnapshot)
- -> Result<(),(ty::BoundRegion,ty::Region)>
-{
- /*!
- * Searches the region constriants created since `snapshot` was started
- * and checks to determine whether any of the skolemized regions created
- * in `skol_map` would "escape" -- meaning that they are related to
- * other regions in some way. If so, the higher-ranked subtyping doesn't
- * hold. See `README.md` for more details.
- */
-
- debug!("leak_check: skol_map={:?}",
- skol_map);
-
- let new_vars = infcx.region_vars_confined_to_snapshot(snapshot);
- for (&skol_br, &skol) in skol_map {
- let tainted = infcx.tainted_regions(snapshot, skol);
- for &tainted_region in &tainted {
- // Each skolemized should only be relatable to itself
- // or new variables:
- match tainted_region {
- ty::ReVar(vid) => {
- if new_vars.iter().any(|&x| x == vid) { continue; }
- }
- _ => {
- if tainted_region == skol { continue; }
- }
- };
+ let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| {
+ self.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot)
+ });
- debug!("{:?} (which replaced {:?}) is tainted by {:?}",
- skol,
- skol_br,
- tainted_region);
+ debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})",
+ binder,
+ result,
+ map);
- // A is not as polymorphic as B:
- return Err((skol_br, tainted_region));
- }
+ (result, map)
}
- Ok(())
-}
-/// This code converts from skolemized regions back to late-bound
-/// regions. It works by replacing each region in the taint set of a
-/// skolemized region with a bound-region. The bound region will be bound
-/// by the outer-most binder in `value`; the caller must ensure that there is
-/// such a binder and it is the right place.
-///
-/// This routine is only intended to be used when the leak-check has
-/// passed; currently, it's used in the trait matching code to create
-/// a set of nested obligations frmo an impl that matches against
-/// something higher-ranked. More details can be found in
-/// `librustc/middle/traits/README.md`.
-///
-/// As a brief example, consider the obligation `for<'a> Fn(&'a int)
-/// -> &'a int`, and the impl:
-///
-/// impl<A,R> Fn<A,R> for SomethingOrOther
-/// where A : Clone
-/// { ... }
-///
-/// Here we will have replaced `'a` with a skolemized region
-/// `'0`. This means that our substitution will be `{A=>&'0
-/// int, R=>&'0 int}`.
-///
-/// When we apply the substitution to the bounds, we will wind up with
-/// `&'0 int : Clone` as a predicate. As a last step, we then go and
-/// replace `'0` with a late-bound region `'a`. The depth is matched
-/// to the depth of the predicate, in this case 1, so that the final
-/// predicate is `for<'a> &'a int : Clone`.
-pub fn plug_leaks<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- skol_map: SkolemizationMap,
- snapshot: &CombinedSnapshot,
- value: &T)
- -> T
- where T : TypeFoldable<'tcx>
-{
- debug_assert!(leak_check(infcx, &skol_map, snapshot).is_ok());
-
- debug!("plug_leaks(skol_map={:?}, value={:?})",
- skol_map,
- value);
-
- // Compute a mapping from the "taint set" of each skolemized
- // region back to the `ty::BoundRegion` that it originally
- // represented. Because `leak_check` passed, we know that
- // these taint sets are mutually disjoint.
- let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
- skol_map
- .into_iter()
- .flat_map(|(skol_br, skol)| {
- infcx.tainted_regions(snapshot, skol)
- .into_iter()
- .map(move |tainted_region| (tainted_region, skol_br))
- })
- .collect();
-
- debug!("plug_leaks: inv_skol_map={:?}",
- inv_skol_map);
-
- // Remove any instantiated type variables from `value`; those can hide
- // references to regions from the `fold_regions` code below.
- let value = infcx.resolve_type_vars_if_possible(value);
-
- // Map any skolemization byproducts back to a late-bound
- // region. Put that late-bound region at whatever the outermost
- // binder is that we encountered in `value`. The caller is
- // responsible for ensuring that (a) `value` contains at least one
- // binder and (b) that binder is the one we want to use.
- let result = infcx.tcx.fold_regions(&value, &mut false, |r, current_depth| {
- match inv_skol_map.get(&r) {
- None => r,
- Some(br) => {
- // It is the responsibility of the caller to ensure
- // that each skolemized region appears within a
- // binder. In practice, this routine is only used by
- // trait checking, and all of the skolemized regions
- // appear inside predicates, which always have
- // binders, so this assert is satisfied.
- assert!(current_depth > 1);
-
- ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone())
+ pub fn leak_check(&self,
+ overly_polymorphic: bool,
+ skol_map: &SkolemizationMap,
+ snapshot: &CombinedSnapshot)
+ -> RelateResult<'tcx, ()>
+ {
+ /*!
+ * Searches the region constriants created since `snapshot` was started
+ * and checks to determine whether any of the skolemized regions created
+ * in `skol_map` would "escape" -- meaning that they are related to
+ * other regions in some way. If so, the higher-ranked subtyping doesn't
+ * hold. See `README.md` for more details.
+ */
+
+ debug!("leak_check: skol_map={:?}",
+ skol_map);
+
+ let new_vars = self.region_vars_confined_to_snapshot(snapshot);
+ for (&skol_br, &skol) in skol_map {
+ let tainted = self.tainted_regions(snapshot, skol);
+ for &tainted_region in &tainted {
+ // Each skolemized should only be relatable to itself
+ // or new variables:
+ match tainted_region {
+ ty::ReVar(vid) => {
+ if new_vars.iter().any(|&x| x == vid) { continue; }
+ }
+ _ => {
+ if tainted_region == skol { continue; }
+ }
+ };
+
+ debug!("{:?} (which replaced {:?}) is tainted by {:?}",
+ skol,
+ skol_br,
+ tainted_region);
+
+ if overly_polymorphic {
+ debug!("Overly polymorphic!");
+ return Err(TypeError::RegionsOverlyPolymorphic(skol_br,
+ tainted_region));
+ } else {
+ debug!("Not as polymorphic!");
+ return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br,
+ tainted_region));
+ }
}
}
- });
+ Ok(())
+ }
+
+ /// This code converts from skolemized regions back to late-bound
+ /// regions. It works by replacing each region in the taint set of a
+ /// skolemized region with a bound-region. The bound region will be bound
+ /// by the outer-most binder in `value`; the caller must ensure that there is
+ /// such a binder and it is the right place.
+ ///
+ /// This routine is only intended to be used when the leak-check has
+ /// passed; currently, it's used in the trait matching code to create
+ /// a set of nested obligations frmo an impl that matches against
+ /// something higher-ranked. More details can be found in
+ /// `librustc/middle/traits/README.md`.
+ ///
+ /// As a brief example, consider the obligation `for<'a> Fn(&'a int)
+ /// -> &'a int`, and the impl:
+ ///
+ /// impl<A,R> Fn<A,R> for SomethingOrOther
+ /// where A : Clone
+ /// { ... }
+ ///
+ /// Here we will have replaced `'a` with a skolemized region
+ /// `'0`. This means that our substitution will be `{A=>&'0
+ /// int, R=>&'0 int}`.
+ ///
+ /// When we apply the substitution to the bounds, we will wind up with
+ /// `&'0 int : Clone` as a predicate. As a last step, we then go and
+ /// replace `'0` with a late-bound region `'a`. The depth is matched
+ /// to the depth of the predicate, in this case 1, so that the final
+ /// predicate is `for<'a> &'a int : Clone`.
+ pub fn plug_leaks<T>(&self,
+ skol_map: SkolemizationMap,
+ snapshot: &CombinedSnapshot,
+ value: &T) -> T
+ where T : TypeFoldable<'tcx>
+ {
+ debug_assert!(self.leak_check(false, &skol_map, snapshot).is_ok());
+
+ debug!("plug_leaks(skol_map={:?}, value={:?})",
+ skol_map,
+ value);
+
+ // Compute a mapping from the "taint set" of each skolemized
+ // region back to the `ty::BoundRegion` that it originally
+ // represented. Because `leak_check` passed, we know that
+ // these taint sets are mutually disjoint.
+ let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
+ skol_map
+ .into_iter()
+ .flat_map(|(skol_br, skol)| {
+ self.tainted_regions(snapshot, skol)
+ .into_iter()
+ .map(move |tainted_region| (tainted_region, skol_br))
+ })
+ .collect();
+
+ debug!("plug_leaks: inv_skol_map={:?}",
+ inv_skol_map);
+
+ // Remove any instantiated type variables from `value`; those can hide
+ // references to regions from the `fold_regions` code below.
+ let value = self.resolve_type_vars_if_possible(value);
+
+ // Map any skolemization byproducts back to a late-bound
+ // region. Put that late-bound region at whatever the outermost
+ // binder is that we encountered in `value`. The caller is
+ // responsible for ensuring that (a) `value` contains at least one
+ // binder and (b) that binder is the one we want to use.
+ let result = self.tcx.fold_regions(&value, &mut false, |r, current_depth| {
+ match inv_skol_map.get(&r) {
+ None => r,
+ Some(br) => {
+ // It is the responsibility of the caller to ensure
+ // that each skolemized region appears within a
+ // binder. In practice, this routine is only used by
+ // trait checking, and all of the skolemized regions
+ // appear inside predicates, which always have
+ // binders, so this assert is satisfied.
+ assert!(current_depth > 1);
+
+ ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone())
+ }
+ }
+ });
- debug!("plug_leaks: result={:?}",
- result);
+ debug!("plug_leaks: result={:?}",
+ result);
- result
+ result
+ }
}
//! over a `LatticeValue`, which is a value defined with respect to
//! a lattice.
-use super::combine;
use super::InferCtxt;
use ty::TyVar;
use ty::{self, Ty};
use ty::relate::{RelateResult, TypeRelation};
-pub trait LatticeDir<'f,'tcx> : TypeRelation<'f,'tcx> {
- fn infcx(&self) -> &'f InferCtxt<'f, 'tcx>;
+pub trait LatticeDir<'f, 'gcx: 'f+'tcx, 'tcx: 'f> : TypeRelation<'f, 'gcx, 'tcx> {
+ fn infcx(&self) -> &'f InferCtxt<'f, 'gcx, 'tcx>;
// Relates the type `v` to `a` and `b` such that `v` represents
// the LUB/GLB of `a` and `b` as appropriate.
fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
}
-pub fn super_lattice_tys<'a,'tcx,L:LatticeDir<'a,'tcx>>(this: &mut L,
- a: Ty<'tcx>,
- b: Ty<'tcx>)
- -> RelateResult<'tcx, Ty<'tcx>>
- where 'tcx: 'a
+pub fn super_lattice_tys<'a, 'gcx, 'tcx, L>(this: &mut L,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ where L: LatticeDir<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
debug!("{}.lattice_tys({:?}, {:?})",
this.tag(),
}
_ => {
- combine::super_combine_tys(this.infcx(), this, a, b)
+ infcx.super_combine_tys(this, a, b)
}
}
}
// except according to those terms.
use super::combine::CombineFields;
-use super::higher_ranked::HigherRankedRelations;
use super::InferCtxt;
use super::lattice::{self, LatticeDir};
use super::Subtype;
use traits::PredicateObligations;
/// "Least upper bound" (common supertype)
-pub struct Lub<'a, 'tcx: 'a> {
- fields: CombineFields<'a, 'tcx>
+pub struct Lub<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fields: CombineFields<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> Lub<'a, 'tcx> {
- pub fn new(fields: CombineFields<'a, 'tcx>) -> Lub<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Lub<'a, 'gcx, 'tcx> {
+ pub fn new(fields: CombineFields<'a, 'gcx, 'tcx>) -> Lub<'a, 'gcx, 'tcx> {
Lub { fields: fields }
}
}
}
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Lub<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Lub<'a, 'gcx, 'tcx> {
fn tag(&self) -> &'static str { "Lub" }
- fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.tcx() }
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.fields.tcx() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- variance: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ variance: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>
{
match variance {
ty::Invariant => self.fields.equate().relate(a, b),
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
self.fields.higher_ranked_lub(a, b)
}
}
-impl<'a, 'tcx> LatticeDir<'a,'tcx> for Lub<'a, 'tcx> {
- fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> LatticeDir<'a, 'gcx, 'tcx> for Lub<'a, 'gcx, 'tcx> {
+ fn infcx(&self) -> &'a InferCtxt<'a, 'gcx, 'tcx> {
self.fields.infcx
}
use middle::mem_categorization as mc;
use middle::mem_categorization::McResult;
use middle::region::CodeExtent;
+use mir::tcx::LvalueTy;
use ty::subst;
use ty::subst::Substs;
use ty::subst::Subst;
use ty::relate::{Relate, RelateResult, TypeRelation};
use traits::{self, PredicateObligations, ProjectionMode};
use rustc_data_structures::unify::{self, UnificationTable};
-use std::cell::{Cell, RefCell, Ref};
+use std::cell::{Cell, RefCell, Ref, RefMut};
use std::fmt;
use syntax::ast;
use syntax::codemap;
use self::combine::CombineFields;
use self::region_inference::{RegionVarBindings, RegionSnapshot};
-use self::error_reporting::ErrorReporting;
use self::unify_key::ToType;
pub mod bivariate;
pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result"
pub type FixupResult<T> = Result<T, FixupError>; // "fixup result"
-pub struct InferCtxt<'a, 'tcx: 'a> {
- pub tcx: &'a TyCtxt<'tcx>,
+/// A version of &ty::Tables which can be global or local.
+/// Only the local version supports borrow_mut.
+#[derive(Copy, Clone)]
+pub enum InferTables<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ Global(&'a RefCell<ty::Tables<'gcx>>),
+ Local(&'a RefCell<ty::Tables<'tcx>>)
+}
- pub tables: &'a RefCell<ty::Tables<'tcx>>,
+impl<'a, 'gcx, 'tcx> InferTables<'a, 'gcx, 'tcx> {
+ pub fn borrow(self) -> Ref<'a, ty::Tables<'tcx>> {
+ match self {
+ InferTables::Global(tables) => tables.borrow(),
+ InferTables::Local(tables) => tables.borrow()
+ }
+ }
+
+ pub fn borrow_mut(self) -> RefMut<'a, ty::Tables<'tcx>> {
+ match self {
+ InferTables::Global(_) => {
+ bug!("InferTables: infcx.tables.borrow_mut() outside of type-checking");
+ }
+ InferTables::Local(tables) => tables.borrow_mut()
+ }
+ }
+}
+
+pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ pub tcx: TyCtxt<'a, 'gcx, 'tcx>,
+
+ pub tables: InferTables<'a, 'gcx, 'tcx>,
// We instantiate UnificationTable with bounds<Ty> because the
// types that might instantiate a general type variable have an
float_unification_table: RefCell<UnificationTable<ty::FloatVid>>,
// For region variables.
- region_vars: RegionVarBindings<'a, 'tcx>,
+ region_vars: RegionVarBindings<'a, 'gcx, 'tcx>,
+
+ pub parameter_environment: ty::ParameterEnvironment<'gcx>,
- pub parameter_environment: ty::ParameterEnvironment<'a, 'tcx>,
+ /// Caches the results of trait selection. This cache is used
+ /// for things that have to do with the parameters in scope.
+ pub selection_cache: traits::SelectionCache<'tcx>,
+
+ /// Caches the results of trait evaluation.
+ pub evaluation_cache: traits::EvaluationCache<'tcx>,
// the set of predicates on which errors have been reported, to
// avoid reporting the same error twice.
UnresolvedTy(TyVid)
}
-pub fn fixup_err_to_string(f: FixupError) -> String {
- use self::FixupError::*;
-
- match f {
- UnresolvedIntTy(_) => {
- "cannot determine the type of this integer; add a suffix to \
- specify the type explicitly".to_string()
- }
- UnresolvedFloatTy(_) => {
- "cannot determine the type of this number; add a suffix to specify \
- the type explicitly".to_string()
- }
- UnresolvedTy(_) => "unconstrained type".to_string(),
- }
-}
+impl fmt::Display for FixupError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ use self::FixupError::*;
-pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a TyCtxt<'tcx>,
- tables: &'a RefCell<ty::Tables<'tcx>>,
- param_env: Option<ty::ParameterEnvironment<'a, 'tcx>>,
- projection_mode: ProjectionMode)
- -> InferCtxt<'a, 'tcx> {
- InferCtxt {
- tcx: tcx,
- tables: tables,
- type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
- int_unification_table: RefCell::new(UnificationTable::new()),
- float_unification_table: RefCell::new(UnificationTable::new()),
- region_vars: RegionVarBindings::new(tcx),
- parameter_environment: param_env.unwrap_or(tcx.empty_parameter_environment()),
- reported_trait_errors: RefCell::new(FnvHashSet()),
- normalize: false,
- projection_mode: projection_mode,
- tainted_by_errors_flag: Cell::new(false),
- err_count_on_creation: tcx.sess.err_count()
+ match *self {
+ UnresolvedIntTy(_) => {
+ write!(f, "cannot determine the type of this integer; \
+ add a suffix to specify the type explicitly")
+ }
+ UnresolvedFloatTy(_) => {
+ write!(f, "cannot determine the type of this number; \
+ add a suffix to specify the type explicitly")
+ }
+ UnresolvedTy(_) => write!(f, "unconstrained type")
+ }
}
}
-pub fn normalizing_infer_ctxt<'a, 'tcx>(tcx: &'a TyCtxt<'tcx>,
- tables: &'a RefCell<ty::Tables<'tcx>>,
- projection_mode: ProjectionMode)
- -> InferCtxt<'a, 'tcx> {
- let mut infcx = new_infer_ctxt(tcx, tables, None, projection_mode);
- infcx.normalize = true;
- infcx
-}
-
-pub fn mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
- a_is_expected: bool,
- origin: TypeOrigin,
- a: Ty<'tcx>,
- b: Ty<'tcx>)
- -> InferResult<'tcx, ()>
-{
- debug!("mk_subty({:?} <: {:?})", a, b);
- cx.sub_types(a_is_expected, origin, a, b)
-}
-
-pub fn can_mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, a: Ty<'tcx>, b: Ty<'tcx>)
- -> UnitResult<'tcx>
-{
- debug!("can_mk_subty({:?} <: {:?})", a, b);
- cx.probe(|_| {
- let trace = TypeTrace {
- origin: TypeOrigin::Misc(codemap::DUMMY_SP),
- values: Types(expected_found(true, a, b))
- };
- cx.sub(true, trace, &a, &b).map(|_| ())
- })
-}
-
-pub fn can_mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, a: Ty<'tcx>, b: Ty<'tcx>)
- -> UnitResult<'tcx>
-{
- cx.can_equate(&a, &b)
+/// Helper type of a temporary returned by tcx.infer_ctxt(...).
+/// Necessary because we can't write the following bound:
+/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(InferCtxt<'b, 'gcx, 'tcx>).
+pub struct InferCtxtBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ global_tcx: TyCtxt<'a, 'gcx, 'gcx>,
+ arenas: ty::CtxtArenas<'tcx>,
+ tables: Option<RefCell<ty::Tables<'tcx>>>,
+ param_env: Option<ty::ParameterEnvironment<'gcx>>,
+ projection_mode: ProjectionMode,
+ normalize: bool
}
-pub fn mk_subr<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
- origin: SubregionOrigin<'tcx>,
- a: ty::Region,
- b: ty::Region) {
- debug!("mk_subr({:?} <: {:?})", a, b);
- let snapshot = cx.region_vars.start_snapshot();
- cx.region_vars.make_subregion(origin, a, b);
- cx.region_vars.commit(snapshot);
-}
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> {
+ pub fn infer_ctxt(self,
+ tables: Option<ty::Tables<'tcx>>,
+ param_env: Option<ty::ParameterEnvironment<'gcx>>,
+ projection_mode: ProjectionMode)
+ -> InferCtxtBuilder<'a, 'gcx, 'tcx> {
+ InferCtxtBuilder {
+ global_tcx: self,
+ arenas: ty::CtxtArenas::new(),
+ tables: tables.map(RefCell::new),
+ param_env: param_env,
+ projection_mode: projection_mode,
+ normalize: false
+ }
+ }
-pub fn mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
- a_is_expected: bool,
- origin: TypeOrigin,
- a: Ty<'tcx>,
- b: Ty<'tcx>)
- -> InferResult<'tcx, ()>
-{
- debug!("mk_eqty({:?} <: {:?})", a, b);
- cx.eq_types(a_is_expected, origin, a, b)
-}
+ pub fn normalizing_infer_ctxt(self, projection_mode: ProjectionMode)
+ -> InferCtxtBuilder<'a, 'gcx, 'tcx> {
+ InferCtxtBuilder {
+ global_tcx: self,
+ arenas: ty::CtxtArenas::new(),
+ tables: None,
+ param_env: None,
+ projection_mode: projection_mode,
+ normalize: false
+ }
+ }
-pub fn mk_eq_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
- a_is_expected: bool,
- origin: TypeOrigin,
- a: ty::TraitRef<'tcx>,
- b: ty::TraitRef<'tcx>)
- -> InferResult<'tcx, ()>
-{
- debug!("mk_eq_trait_refs({:?} = {:?})", a, b);
- cx.eq_trait_refs(a_is_expected, origin, a, b)
+ /// Fake InferCtxt with the global tcx. Used by pre-MIR borrowck
+ /// for MemCategorizationContext/ExprUseVisitor.
+ /// If any inference functionality is used, ICEs will occur.
+ pub fn borrowck_fake_infer_ctxt(self, param_env: ty::ParameterEnvironment<'gcx>)
+ -> InferCtxt<'a, 'gcx, 'gcx> {
+ InferCtxt {
+ tcx: self,
+ tables: InferTables::Global(&self.tables),
+ type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
+ int_unification_table: RefCell::new(UnificationTable::new()),
+ float_unification_table: RefCell::new(UnificationTable::new()),
+ region_vars: RegionVarBindings::new(self),
+ parameter_environment: param_env,
+ selection_cache: traits::SelectionCache::new(),
+ evaluation_cache: traits::EvaluationCache::new(),
+ reported_trait_errors: RefCell::new(FnvHashSet()),
+ normalize: false,
+ projection_mode: ProjectionMode::AnyFinal,
+ tainted_by_errors_flag: Cell::new(false),
+ err_count_on_creation: self.sess.err_count()
+ }
+ }
}
-pub fn mk_sub_poly_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
- a_is_expected: bool,
- origin: TypeOrigin,
- a: ty::PolyTraitRef<'tcx>,
- b: ty::PolyTraitRef<'tcx>)
- -> InferResult<'tcx, ()>
-{
- debug!("mk_sub_poly_trait_refs({:?} <: {:?})", a, b);
- cx.sub_poly_trait_refs(a_is_expected, origin, a, b)
+impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> {
+ pub fn enter<F, R>(&'tcx mut self, f: F) -> R
+ where F: for<'b> FnOnce(InferCtxt<'b, 'gcx, 'tcx>) -> R
+ {
+ let InferCtxtBuilder {
+ global_tcx,
+ ref arenas,
+ ref tables,
+ ref mut param_env,
+ projection_mode,
+ normalize
+ } = *self;
+ let tables = if let Some(ref tables) = *tables {
+ InferTables::Local(tables)
+ } else {
+ InferTables::Global(&global_tcx.tables)
+ };
+ let param_env = param_env.take().unwrap_or_else(|| {
+ global_tcx.empty_parameter_environment()
+ });
+ global_tcx.enter_local(arenas, |tcx| f(InferCtxt {
+ tcx: tcx,
+ tables: tables,
+ type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
+ int_unification_table: RefCell::new(UnificationTable::new()),
+ float_unification_table: RefCell::new(UnificationTable::new()),
+ region_vars: RegionVarBindings::new(tcx),
+ parameter_environment: param_env,
+ selection_cache: traits::SelectionCache::new(),
+ evaluation_cache: traits::EvaluationCache::new(),
+ reported_trait_errors: RefCell::new(FnvHashSet()),
+ normalize: normalize,
+ projection_mode: projection_mode,
+ tainted_by_errors_flag: Cell::new(false),
+ err_count_on_creation: tcx.sess.err_count()
+ }))
+ }
}
-pub fn mk_eq_impl_headers<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
- a_is_expected: bool,
- origin: TypeOrigin,
- a: &ty::ImplHeader<'tcx>,
- b: &ty::ImplHeader<'tcx>)
- -> InferResult<'tcx, ()>
-{
- debug!("mk_eq_impl_header({:?} = {:?})", a, b);
- match (a.trait_ref, b.trait_ref) {
- (Some(a_ref), Some(b_ref)) => mk_eq_trait_refs(cx, a_is_expected, origin, a_ref, b_ref),
- (None, None) => mk_eqty(cx, a_is_expected, origin, a.self_ty, b.self_ty),
- _ => bug!("mk_eq_impl_headers given mismatched impl kinds"),
+impl<T> ExpectedFound<T> {
+ fn new(a_is_expected: bool, a: T, b: T) -> Self {
+ if a_is_expected {
+ ExpectedFound {expected: a, found: b}
+ } else {
+ ExpectedFound {expected: b, found: a}
+ }
}
}
-fn expected_found<T>(a_is_expected: bool,
- a: T,
- b: T)
- -> ExpectedFound<T>
-{
- if a_is_expected {
- ExpectedFound {expected: a, found: b}
- } else {
- ExpectedFound {expected: b, found: a}
+impl<'tcx, T> InferOk<'tcx, T> {
+ fn unit(self) -> InferOk<'tcx, ()> {
+ InferOk { value: (), obligations: self.obligations }
}
}
region_vars_snapshot: RegionSnapshot,
}
-// NOTE: Callable from trans only!
-pub fn normalize_associated_type<'tcx,T>(tcx: &TyCtxt<'tcx>, value: &T) -> T
- where T : TypeFoldable<'tcx>
-{
- debug!("normalize_associated_type(t={:?})", value);
+/// Helper trait for shortening the lifetimes inside a
+/// value for post-type-checking normalization.
+pub trait TransNormalize<'gcx>: TypeFoldable<'gcx> {
+ fn trans_normalize<'a, 'tcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Self;
+}
- let value = tcx.erase_regions(value);
+macro_rules! items { ($($item:item)+) => ($($item)+) }
+macro_rules! impl_trans_normalize {
+ ($lt_gcx:tt, $($ty:ty),+) => {
+ items!($(impl<$lt_gcx> TransNormalize<$lt_gcx> for $ty {
+ fn trans_normalize<'a, 'tcx>(&self,
+ infcx: &InferCtxt<'a, $lt_gcx, 'tcx>)
+ -> Self {
+ infcx.normalize_projections_in(self)
+ }
+ })+);
+ }
+}
- if !value.has_projection_types() {
- return value;
+impl_trans_normalize!('gcx,
+ Ty<'gcx>,
+ &'gcx Substs<'gcx>,
+ ty::FnSig<'gcx>,
+ ty::FnOutput<'gcx>,
+ &'gcx ty::BareFnTy<'gcx>,
+ ty::ClosureSubsts<'gcx>,
+ ty::PolyTraitRef<'gcx>
+);
+
+impl<'gcx> TransNormalize<'gcx> for LvalueTy<'gcx> {
+ fn trans_normalize<'a, 'tcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Self {
+ match *self {
+ LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.trans_normalize(infcx) },
+ LvalueTy::Downcast { adt_def, substs, variant_index } => {
+ LvalueTy::Downcast {
+ adt_def: adt_def,
+ substs: substs.trans_normalize(infcx),
+ variant_index: variant_index
+ }
+ }
+ }
}
+}
- let infcx = new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::Any);
- let mut selcx = traits::SelectionContext::new(&infcx);
- let cause = traits::ObligationCause::dummy();
- let traits::Normalized { value: result, obligations } =
- traits::normalize(&mut selcx, cause, &value);
+// NOTE: Callable from trans only!
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ pub fn normalize_associated_type<T>(self, value: &T) -> T
+ where T: TransNormalize<'tcx>
+ {
+ debug!("normalize_associated_type(t={:?})", value);
- debug!("normalize_associated_type: result={:?} obligations={:?}",
- result,
- obligations);
+ let value = self.erase_regions(value);
- let mut fulfill_cx = traits::FulfillmentContext::new();
+ if !value.has_projection_types() {
+ return value;
+ }
- for obligation in obligations {
- fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ self.infer_ctxt(None, None, ProjectionMode::Any).enter(|infcx| {
+ value.trans_normalize(&infcx)
+ })
}
-
- drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result)
}
-pub fn drain_fulfillment_cx_or_panic<'a,'tcx,T>(span: Span,
- infcx: &InferCtxt<'a,'tcx>,
- fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
- result: &T)
- -> T
- where T : TypeFoldable<'tcx>
-{
- match drain_fulfillment_cx(infcx, fulfill_cx, result) {
- Ok(v) => v,
- Err(errors) => {
- span_bug!(
- span,
- "Encountered errors `{:?}` fulfilling during trans",
- errors);
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
+ fn normalize_projections_in<T>(&self, value: &T) -> T::Lifted
+ where T: TypeFoldable<'tcx> + ty::Lift<'gcx>
+ {
+ let mut selcx = traits::SelectionContext::new(self);
+ let cause = traits::ObligationCause::dummy();
+ let traits::Normalized { value: result, obligations } =
+ traits::normalize(&mut selcx, cause, value);
+
+ debug!("normalize_projections_in: result={:?} obligations={:?}",
+ result, obligations);
+
+ let mut fulfill_cx = traits::FulfillmentContext::new();
+
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(self, obligation);
}
+
+ self.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
}
-}
-/// Finishes processes any obligations that remain in the fulfillment
-/// context, and then "freshens" and returns `result`. This is
-/// primarily used during normalization and other cases where
-/// processing the obligations in `fulfill_cx` may cause type
-/// inference variables that appear in `result` to be unified, and
-/// hence we need to process those obligations to get the complete
-/// picture of the type.
-pub fn drain_fulfillment_cx<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
- result: &T)
- -> Result<T,Vec<traits::FulfillmentError<'tcx>>>
- where T : TypeFoldable<'tcx>
-{
- debug!("drain_fulfillment_cx(result={:?})",
- result);
-
- // In principle, we only need to do this so long as `result`
- // contains unbound type parameters. It could be a slight
- // optimization to stop iterating early.
- match fulfill_cx.select_all_or_error(infcx) {
- Ok(()) => { }
- Err(errors) => {
- return Err(errors);
+ pub fn drain_fulfillment_cx_or_panic<T>(&self,
+ span: Span,
+ fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+ result: &T)
+ -> T::Lifted
+ where T: TypeFoldable<'tcx> + ty::Lift<'gcx>
+ {
+ let when = "resolving bounds after type-checking";
+ let v = match self.drain_fulfillment_cx(fulfill_cx, result) {
+ Ok(v) => v,
+ Err(errors) => {
+ span_bug!(span, "Encountered errors `{:?}` {}", errors, when);
+ }
+ };
+
+ match self.tcx.lift_to_global(&v) {
+ Some(v) => v,
+ None => {
+ span_bug!(span, "Uninferred types/regions in `{:?}` {}", v, when);
+ }
}
}
- let result = infcx.resolve_type_vars_if_possible(result);
- Ok(infcx.tcx.erase_regions(&result))
-}
+ /// Finishes processes any obligations that remain in the fulfillment
+ /// context, and then "freshens" and returns `result`. This is
+ /// primarily used during normalization and other cases where
+ /// processing the obligations in `fulfill_cx` may cause type
+ /// inference variables that appear in `result` to be unified, and
+ /// hence we need to process those obligations to get the complete
+ /// picture of the type.
+ pub fn drain_fulfillment_cx<T>(&self,
+ fulfill_cx: &mut traits::FulfillmentContext<'tcx>,
+ result: &T)
+ -> Result<T,Vec<traits::FulfillmentError<'tcx>>>
+ where T : TypeFoldable<'tcx>
+ {
+ debug!("drain_fulfillment_cx(result={:?})",
+ result);
-impl<'tcx, T> InferOk<'tcx, T> {
- fn unit(self) -> InferOk<'tcx, ()> {
- InferOk { value: (), obligations: self.obligations }
+ // In principle, we only need to do this so long as `result`
+ // contains unbound type parameters. It could be a slight
+ // optimization to stop iterating early.
+ fulfill_cx.select_all_or_error(self)?;
+
+ let result = self.resolve_type_vars_if_possible(result);
+ Ok(self.tcx.erase_regions(&result))
}
-}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn projection_mode(&self) -> ProjectionMode {
self.projection_mode
}
}
}
- pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'tcx> {
+ pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'gcx, 'tcx> {
freshen::TypeFreshener::new(self)
}
}
fn combine_fields(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>)
- -> CombineFields<'a, 'tcx>
- {
+ -> CombineFields<'a, 'gcx, 'tcx> {
CombineFields {
infcx: self,
a_is_expected: a_is_expected,
pub fn equate<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
-> InferResult<'tcx, T>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
let mut equate = self.combine_fields(a_is_expected, trace).equate();
let result = equate.relate(a, b);
pub fn sub<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
-> InferResult<'tcx, T>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
let mut sub = self.combine_fields(a_is_expected, trace).sub();
let result = sub.relate(a, b);
pub fn lub<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
-> InferResult<'tcx, T>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
let mut lub = self.combine_fields(a_is_expected, trace).lub();
let result = lub.relate(a, b);
pub fn glb<T>(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T)
-> InferResult<'tcx, T>
- where T: Relate<'a, 'tcx>
+ where T: Relate<'tcx>
{
let mut glb = self.combine_fields(a_is_expected, trace).glb();
let result = glb.relate(a, b);
})
}
+ pub fn can_sub_types(&self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>)
+ -> UnitResult<'tcx>
+ {
+ self.probe(|_| {
+ let origin = TypeOrigin::Misc(codemap::DUMMY_SP);
+ let trace = TypeTrace::types(origin, true, a, b);
+ self.sub(true, trace, &a, &b).map(|_| ())
+ })
+ }
+
pub fn eq_types(&self,
a_is_expected: bool,
origin: TypeOrigin,
b: ty::TraitRef<'tcx>)
-> InferResult<'tcx, ()>
{
- debug!("eq_trait_refs({:?} <: {:?})",
- a,
- b);
+ debug!("eq_trait_refs({:?} = {:?})", a, b);
self.commit_if_ok(|_| {
let trace = TypeTrace {
origin: origin,
- values: TraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
+ values: TraitRefs(ExpectedFound::new(a_is_expected, a, b))
};
self.equate(a_is_expected, trace, &a, &b).map(|ok| ok.unit())
})
}
+ pub fn eq_impl_headers(&self,
+ a_is_expected: bool,
+ origin: TypeOrigin,
+ a: &ty::ImplHeader<'tcx>,
+ b: &ty::ImplHeader<'tcx>)
+ -> InferResult<'tcx, ()>
+ {
+ debug!("eq_impl_header({:?} = {:?})", a, b);
+ match (a.trait_ref, b.trait_ref) {
+ (Some(a_ref), Some(b_ref)) => self.eq_trait_refs(a_is_expected, origin, a_ref, b_ref),
+ (None, None) => self.eq_types(a_is_expected, origin, a.self_ty, b.self_ty),
+ _ => bug!("mk_eq_impl_headers given mismatched impl kinds"),
+ }
+ }
+
pub fn sub_poly_trait_refs(&self,
a_is_expected: bool,
origin: TypeOrigin,
b: ty::PolyTraitRef<'tcx>)
-> InferResult<'tcx, ()>
{
- debug!("sub_poly_trait_refs({:?} <: {:?})",
- a,
- b);
+ debug!("sub_poly_trait_refs({:?} <: {:?})", a, b);
self.commit_if_ok(|_| {
let trace = TypeTrace {
origin: origin,
- values: PolyTraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
+ values: PolyTraitRefs(ExpectedFound::new(a_is_expected, a, b))
};
self.sub(a_is_expected, trace, &a, &b).map(|ok| ok.unit())
})
}
- pub fn skolemize_late_bound_regions<T>(&self,
- value: &ty::Binder<T>,
- snapshot: &CombinedSnapshot)
- -> (T, SkolemizationMap)
- where T : TypeFoldable<'tcx>
- {
- /*! See `higher_ranked::skolemize_late_bound_regions` */
-
- higher_ranked::skolemize_late_bound_regions(self, value, snapshot)
- }
-
- pub fn leak_check(&self,
- skol_map: &SkolemizationMap,
- snapshot: &CombinedSnapshot)
- -> UnitResult<'tcx>
- {
- /*! See `higher_ranked::leak_check` */
-
- match higher_ranked::leak_check(self, skol_map, snapshot) {
- Ok(()) => Ok(()),
- Err((br, r)) => Err(TypeError::RegionsInsufficientlyPolymorphic(br, r))
- }
- }
-
- pub fn plug_leaks<T>(&self,
- skol_map: SkolemizationMap,
- snapshot: &CombinedSnapshot,
- value: &T)
- -> T
- where T : TypeFoldable<'tcx>
- {
- /*! See `higher_ranked::plug_leaks` */
-
- higher_ranked::plug_leaks(self, skol_map, snapshot, value)
+ pub fn sub_regions(&self,
+ origin: SubregionOrigin<'tcx>,
+ a: ty::Region,
+ b: ty::Region) {
+ debug!("sub_regions({:?} <: {:?})", a, b);
+ self.region_vars.make_subregion(origin, a, b);
}
pub fn equality_predicate(&self,
let (ty::EquatePredicate(a, b), skol_map) =
self.skolemize_late_bound_regions(predicate, snapshot);
let origin = TypeOrigin::EquatePredicate(span);
- let eqty_ok = mk_eqty(self, false, origin, a, b)?;
- self.leak_check(&skol_map, snapshot).map(|_| eqty_ok.unit())
+ let eqty_ok = self.eq_types(false, origin, a, b)?;
+ self.leak_check(false, &skol_map, snapshot).map(|_| eqty_ok.unit())
})
}
let (ty::OutlivesPredicate(r_a, r_b), skol_map) =
self.skolemize_late_bound_regions(predicate, snapshot);
let origin = RelateRegionParamBound(span);
- let () = mk_subr(self, origin, r_b, r_a); // `b : a` ==> `a <= b`
- self.leak_check(&skol_map, snapshot)
+ self.sub_regions(origin, r_b, r_a); // `b : a` ==> `a <= b`
+ self.leak_check(false, &skol_map, snapshot)
})
}
pub fn fresh_substs_for_generics(&self,
span: Span,
generics: &ty::Generics<'tcx>)
- -> subst::Substs<'tcx>
+ -> &'tcx subst::Substs<'tcx>
{
let type_params = subst::VecPerParamSpace::empty();
generics.types.get_slice(*space));
}
- return substs;
+ self.tcx.mk_substs(substs)
}
/// Given a set of generics defined on a trait, returns a substitution mapping each output
self.region_vars.verify_generic_bound(origin, kind, a, bound);
}
- pub fn can_equate<'b,T>(&'b self, a: &T, b: &T) -> UnitResult<'tcx>
- where T: Relate<'b,'tcx> + fmt::Debug
+ pub fn can_equate<T>(&self, a: &T, b: &T) -> UnitResult<'tcx>
+ where T: Relate<'tcx> + fmt::Debug
{
debug!("can_equate({:?}, {:?})", a, b);
self.probe(|_| {
// anyhow. We should make this typetrace stuff more
// generic so we don't have to do anything quite this
// terrible.
- let e = self.tcx.types.err;
- let trace = TypeTrace {
- origin: TypeOrigin::Misc(codemap::DUMMY_SP),
- values: Types(expected_found(true, e, e))
- };
- self.equate(true, trace, a, b)
+ self.equate(true, TypeTrace::dummy(self.tcx), a, b)
}).map(|_| ())
}
self.resolve_type_vars_or_error(&ty)
}
- pub fn tables_are_tcx_tables(&self) -> bool {
- let tables: &RefCell<ty::Tables> = &self.tables;
- let tcx_tables: &RefCell<ty::Tables> = &self.tcx.tables;
- tables as *const _ == tcx_tables as *const _
- }
-
pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool {
let ty = self.resolve_type_vars_if_possible(&ty);
- if ty.needs_infer() ||
- (ty.has_closure_types() && !self.tables_are_tcx_tables()) {
- // this can get called from typeck (by euv), and moves_by_default
- // rightly refuses to work with inference variables, but
- // moves_by_default has a cache, which we want to use in other
- // cases.
- !traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundCopy, span)
- } else {
- ty.moves_by_default(&self.parameter_environment, span)
+ if let Some(ty) = self.tcx.lift_to_global(&ty) {
+ // Even if the type may have no inference variables, during
+ // type-checking closure types are in local tables only.
+ let local_closures = match self.tables {
+ InferTables::Local(_) => ty.has_closure_types(),
+ InferTables::Global(_) => false
+ };
+ if !local_closures {
+ return ty.moves_by_default(self.tcx.global_tcx(), self.param_env(), span);
+ }
}
+
+ // this can get called from typeck (by euv), and moves_by_default
+ // rightly refuses to work with inference variables, but
+ // moves_by_default has a cache, which we want to use in other
+ // cases.
+ !traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundCopy, span)
}
pub fn node_method_ty(&self, method_call: ty::MethodCall)
self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned()
}
- pub fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> {
+ pub fn param_env(&self) -> &ty::ParameterEnvironment<'gcx> {
&self.parameter_environment
}
// during trans, we see closure ids from other traits.
// That may require loading the closure data out of the
// cstore.
- Some(ty::Tables::closure_kind(&self.tables, self.tcx, def_id))
+ Some(self.tcx.closure_kind(def_id))
}
}
pub fn closure_type(&self,
def_id: DefId,
- substs: &ty::ClosureSubsts<'tcx>)
+ substs: ty::ClosureSubsts<'tcx>)
-> ty::ClosureTy<'tcx>
{
- let closure_ty =
- ty::Tables::closure_type(self.tables,
- self.tcx,
- def_id,
- substs);
+ if let InferTables::Local(tables) = self.tables {
+ if let Some(ty) = tables.borrow().closure_tys.get(&def_id) {
+ return ty.subst(self.tcx, substs.func_substs);
+ }
+ }
+ let closure_ty = self.tcx.closure_type(def_id, substs);
if self.normalize {
- normalize_associated_type(&self.tcx, &closure_ty)
+ let closure_ty = self.tcx.erase_regions(&closure_ty);
+
+ if !closure_ty.has_projection_types() {
+ return closure_ty;
+ }
+
+ self.normalize_projections_in(&closure_ty)
} else {
closure_ty
}
}
}
-impl<'tcx> TypeTrace<'tcx> {
+impl<'a, 'gcx, 'tcx> TypeTrace<'tcx> {
pub fn span(&self) -> Span {
self.origin.span()
}
-> TypeTrace<'tcx> {
TypeTrace {
origin: origin,
- values: Types(expected_found(a_is_expected, a, b))
+ values: Types(ExpectedFound::new(a_is_expected, a, b))
}
}
- pub fn dummy(tcx: &TyCtxt<'tcx>) -> TypeTrace<'tcx> {
+ pub fn dummy(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> TypeTrace<'tcx> {
TypeTrace {
origin: TypeOrigin::Misc(codemap::DUMMY_SP),
values: Types(ExpectedFound {
");
}
-pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a, 'tcx>,
- subject_node: ast::NodeId) {
+pub fn maybe_print_constraints_for<'a, 'gcx, 'tcx>(
+ region_vars: &RegionVarBindings<'a, 'gcx, 'tcx>,
+ subject_node: ast::NodeId)
+{
let tcx = region_vars.tcx;
if !region_vars.tcx.sess.opts.debugging_opts.print_region_graph {
}
}
-struct ConstraintGraph<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+struct ConstraintGraph<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
graph_name: String,
map: &'a FnvHashMap<Constraint, SubregionOrigin<'tcx>>,
node_ids: FnvHashMap<Node, usize>,
EnclScope(CodeExtent, CodeExtent),
}
-impl<'a, 'tcx> ConstraintGraph<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>,
+impl<'a, 'gcx, 'tcx> ConstraintGraph<'a, 'gcx, 'tcx> {
+ fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
name: String,
map: &'a ConstraintMap<'tcx>)
- -> ConstraintGraph<'a, 'tcx> {
+ -> ConstraintGraph<'a, 'gcx, 'tcx> {
let mut i = 0;
let mut node_ids = FnvHashMap();
{
}
}
-impl<'a, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'gcx, 'tcx> {
type Node = Node;
type Edge = Edge;
fn graph_id(&self) -> dot::Id {
}
}
-impl<'a, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> {
type Node = Node;
type Edge = Edge;
fn nodes(&self) -> dot::Nodes<Node> {
pub type ConstraintMap<'tcx> = FnvHashMap<Constraint, SubregionOrigin<'tcx>>;
-fn dump_region_constraints_to<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
- map: &ConstraintMap<'tcx>,
- path: &str)
- -> io::Result<()> {
+fn dump_region_constraints_to<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ map: &ConstraintMap<'tcx>,
+ path: &str)
+ -> io::Result<()> {
debug!("dump_region_constraints map (len: {}) path: {}",
map.len(),
path);
pub type CombineMap = FnvHashMap<TwoRegions, RegionVid>;
-pub struct RegionVarBindings<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
var_origins: RefCell<Vec<RegionVariableOrigin>>,
// Constraints of the form `A <= B` introduced by the region
skolemization_count: u32,
}
-impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
- pub fn new(tcx: &'a TyCtxt<'tcx>) -> RegionVarBindings<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> RegionVarBindings<'a, 'gcx, 'tcx> {
RegionVarBindings {
tcx: tcx,
var_origins: RefCell::new(Vec::new()),
origin: SubregionOrigin<'tcx>,
mut relate: F)
-> Region
- where F: FnMut(&RegionVarBindings<'a, 'tcx>, Region, Region)
+ where F: FnMut(&RegionVarBindings<'a, 'gcx, 'tcx>, Region, Region)
{
let vars = TwoRegions { a: a, b: b };
match self.combine_map(t).borrow().get(&vars) {
type RegionGraph = graph::Graph<(), Constraint>;
-impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> {
fn infer_variable_values(&self,
free_regions: &FreeRegionMap,
errors: &mut Vec<RegionResolutionError<'tcx>>,
let WalkState {result, dup_found, ..} = state;
return (result, dup_found);
- fn process_edges<'a, 'tcx>(this: &RegionVarBindings<'a, 'tcx>,
- state: &mut WalkState<'tcx>,
- graph: &RegionGraph,
- source_vid: RegionVid,
- dir: Direction) {
+ fn process_edges<'a, 'gcx, 'tcx>(this: &RegionVarBindings<'a, 'gcx, 'tcx>,
+ state: &mut WalkState<'tcx>,
+ graph: &RegionGraph,
+ source_vid: RegionVid,
+ dir: Direction) {
debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
let source_node_index = NodeIndex(source_vid.index as usize);
}
}
-impl<'tcx> GenericKind<'tcx> {
- pub fn to_ty(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+impl<'a, 'gcx, 'tcx> GenericKind<'tcx> {
+ pub fn to_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match *self {
GenericKind::Param(ref p) => p.to_ty(tcx),
GenericKind::Projection(ref p) => tcx.mk_projection(p.trait_ref.clone(), p.item_name),
}
}
-impl VerifyBound {
+impl<'a, 'gcx, 'tcx> VerifyBound {
fn for_each_region(&self, f: &mut FnMut(ty::Region)) {
match self {
&VerifyBound::AnyRegion(ref rs) |
}
}
- fn is_met<'tcx>(&self,
- tcx: &TyCtxt<'tcx>,
- free_regions: &FreeRegionMap,
- var_values: &Vec<VarValue>,
- min: ty::Region)
- -> bool {
+ fn is_met(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ free_regions: &FreeRegionMap,
+ var_values: &Vec<VarValue>,
+ min: ty::Region)
+ -> bool {
match self {
&VerifyBound::AnyRegion(ref rs) =>
rs.iter()
use super::{InferCtxt, FixupError, FixupResult};
use ty::{self, Ty, TyCtxt, TypeFoldable};
+use ty::fold::TypeFolder;
///////////////////////////////////////////////////////////////////////////
// OPPORTUNISTIC TYPE RESOLVER
/// been unified with (similar to `shallow_resolve`, but deep). This is
/// useful for printing messages etc but also required at various
/// points for correctness.
-pub struct OpportunisticTypeResolver<'a, 'tcx:'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+pub struct OpportunisticTypeResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
}
-impl<'a, 'tcx> OpportunisticTypeResolver<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> OpportunisticTypeResolver<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> OpportunisticTypeResolver<'a, 'gcx, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self {
OpportunisticTypeResolver { infcx: infcx }
}
}
-impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeResolver<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpportunisticTypeResolver<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> {
self.infcx.tcx
}
/// The opportunistic type and region resolver is similar to the
/// opportunistic type resolver, but also opportunistly resolves
/// regions. It is useful for canonicalization.
-pub struct OpportunisticTypeAndRegionResolver<'a, 'tcx:'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+pub struct OpportunisticTypeAndRegionResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
}
-impl<'a, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+impl<'a, 'gcx, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'gcx, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self {
OpportunisticTypeAndRegionResolver { infcx: infcx }
}
}
-impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeAndRegionResolver<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpportunisticTypeAndRegionResolver<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> {
self.infcx.tcx
}
/// Full type resolution replaces all type and region variables with
/// their concrete results. If any variable cannot be replaced (never unified, etc)
/// then an `Err` result is returned.
-pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a,'tcx>, value: &T) -> FixupResult<T>
+pub fn fully_resolve<'a, 'gcx, 'tcx, T>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ value: &T) -> FixupResult<T>
where T : TypeFoldable<'tcx>
{
let mut full_resolver = FullTypeResolver { infcx: infcx, err: None };
// N.B. This type is not public because the protocol around checking the
// `err` field is not enforcable otherwise.
-struct FullTypeResolver<'a, 'tcx:'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+struct FullTypeResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
err: Option<FixupError>,
}
-impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for FullTypeResolver<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> {
self.infcx.tcx
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::combine::{self, CombineFields};
-use super::higher_ranked::HigherRankedRelations;
+use super::combine::CombineFields;
use super::SubregionOrigin;
use super::type_variable::{SubtypeOf, SupertypeOf};
use std::mem;
/// Ensures `a` is made a subtype of `b`. Returns `a` on success.
-pub struct Sub<'a, 'tcx: 'a> {
- fields: CombineFields<'a, 'tcx>,
+pub struct Sub<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fields: CombineFields<'a, 'gcx, 'tcx>,
}
-impl<'a, 'tcx> Sub<'a, 'tcx> {
- pub fn new(f: CombineFields<'a, 'tcx>) -> Sub<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Sub<'a, 'gcx, 'tcx> {
+ pub fn new(f: CombineFields<'a, 'gcx, 'tcx>) -> Sub<'a, 'gcx, 'tcx> {
Sub { fields: f }
}
}
}
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Sub<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Sub<'a, 'gcx, 'tcx> {
fn tag(&self) -> &'static str { "Sub" }
- fn tcx(&self) -> &'a TyCtxt<'tcx> { self.fields.infcx.tcx }
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.fields.infcx.tcx }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn with_cause<F,R>(&mut self, cause: Cause, f: F) -> R
r
}
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- variance: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ variance: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>
{
match variance {
ty::Invariant => self.fields.equate().relate(a, b),
}
_ => {
- combine::super_combine_tys(self.fields.infcx, self, a, b)?;
+ self.fields.infcx.super_combine_tys(self, a, b)?;
Ok(a)
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a,'tcx>
+ where T: Relate<'tcx>
{
self.fields.higher_ranked_sub(a, b)
}
use ty::{self, IntVarValue, Ty, TyCtxt};
use rustc_data_structures::unify::{Combine, UnifyKey};
-pub trait ToType<'tcx> {
- fn to_type(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx>;
+pub trait ToType {
+ fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>;
}
impl UnifyKey for ty::IntVid {
fn tag(_: Option<ty::RegionVid>) -> &'static str { "RegionVid" }
}
-impl<'tcx> ToType<'tcx> for IntVarValue {
- fn to_type(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+impl ToType for IntVarValue {
+ fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match *self {
ty::IntType(i) => tcx.mk_mach_int(i),
ty::UintType(i) => tcx.mk_mach_uint(i),
fn tag(_: Option<ty::FloatVid>) -> &'static str { "FloatVid" }
}
-impl<'tcx> ToType<'tcx> for ast::FloatTy {
- fn to_type(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+impl ToType for ast::FloatTy {
+ fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
tcx.mk_mach_float(*self)
}
}
/// Context for lint checking after type checking.
pub struct LateContext<'a, 'tcx: 'a> {
/// Type context we're checking in.
- pub tcx: &'a TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// The crate being checked.
pub krate: &'a hir::Crate,
}
impl<'a, 'tcx> LateContext<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>,
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
krate: &'a hir::Crate,
access_levels: &'a AccessLevels) -> LateContext<'a, 'tcx> {
// We want to own the lint store, so move it out of the session.
/// items in the context of the outer item, so enable
/// deep-walking.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, it: &hir::Item) {
/// Perform lint checking on a crate.
///
/// Consumes the `lint_store` field of the `Session`.
-pub fn check_crate(tcx: &TyCtxt, access_levels: &AccessLevels) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ access_levels: &AccessLevels) {
let _task = tcx.dep_graph.in_task(DepNode::LateLintCheck);
let krate = tcx.map.krate();
use syntax::codemap::Span;
use hir as ast;
-pub fn prohibit_type_params(tcx: &TyCtxt, segments: &[ast::PathSegment]) {
- for segment in segments {
- for typ in segment.parameters.types() {
- span_err!(tcx.sess, typ.span, E0109,
- "type parameters are not allowed on this type");
- break;
- }
- for lifetime in segment.parameters.lifetimes() {
- span_err!(tcx.sess, lifetime.span, E0110,
- "lifetime parameters are not allowed on this type");
- break;
- }
- for binding in segment.parameters.bindings() {
- prohibit_projection(tcx, binding.span);
- break;
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn prohibit_type_params(self, segments: &[ast::PathSegment]) {
+ for segment in segments {
+ for typ in segment.parameters.types() {
+ span_err!(self.sess, typ.span, E0109,
+ "type parameters are not allowed on this type");
+ break;
+ }
+ for lifetime in segment.parameters.lifetimes() {
+ span_err!(self.sess, lifetime.span, E0110,
+ "lifetime parameters are not allowed on this type");
+ break;
+ }
+ for binding in segment.parameters.bindings() {
+ self.prohibit_projection(binding.span);
+ break;
+ }
}
}
-}
-pub fn prohibit_projection(tcx: &TyCtxt, span: Span)
-{
- span_err!(tcx.sess, span, E0229,
- "associated type bindings are not allowed here");
-}
+ pub fn prohibit_projection(self, span: Span)
+ {
+ span_err!(self.sess, span, E0229,
+ "associated type bindings are not allowed here");
+ }
-pub fn prim_ty_to_ty<'tcx>(tcx: &TyCtxt<'tcx>,
- segments: &[ast::PathSegment],
- nty: ast::PrimTy)
- -> Ty<'tcx> {
- prohibit_type_params(tcx, segments);
- match nty {
- ast::TyBool => tcx.types.bool,
- ast::TyChar => tcx.types.char,
- ast::TyInt(it) => tcx.mk_mach_int(it),
- ast::TyUint(uit) => tcx.mk_mach_uint(uit),
- ast::TyFloat(ft) => tcx.mk_mach_float(ft),
- ast::TyStr => tcx.mk_str()
+ pub fn prim_ty_to_ty(self,
+ segments: &[ast::PathSegment],
+ nty: ast::PrimTy)
+ -> Ty<'tcx> {
+ self.prohibit_type_params(segments);
+ match nty {
+ ast::TyBool => self.types.bool,
+ ast::TyChar => self.types.char,
+ ast::TyInt(it) => self.mk_mach_int(it),
+ ast::TyUint(uit) => self.mk_mach_uint(uit),
+ ast::TyFloat(ft) => self.mk_mach_float(ft),
+ ast::TyStr => self.mk_str()
+ }
}
-}
-/// If a type in the AST is a primitive type, return the ty::Ty corresponding
-/// to it.
-pub fn ast_ty_to_prim_ty<'tcx>(tcx: &TyCtxt<'tcx>, ast_ty: &ast::Ty)
- -> Option<Ty<'tcx>> {
- if let ast::TyPath(None, ref path) = ast_ty.node {
- let def = match tcx.def_map.borrow().get(&ast_ty.id) {
- None => {
- span_bug!(ast_ty.span, "unbound path {:?}", path)
+ /// If a type in the AST is a primitive type, return the ty::Ty corresponding
+ /// to it.
+ pub fn ast_ty_to_prim_ty(self, ast_ty: &ast::Ty) -> Option<Ty<'tcx>> {
+ if let ast::TyPath(None, ref path) = ast_ty.node {
+ let def = match self.def_map.borrow().get(&ast_ty.id) {
+ None => {
+ span_bug!(ast_ty.span, "unbound path {:?}", path)
+ }
+ Some(d) => d.full_def()
+ };
+ if let Def::PrimTy(nty) = def {
+ Some(self.prim_ty_to_ty(&path.segments, nty))
+ } else {
+ None
}
- Some(d) => d.full_def()
- };
- if let Def::PrimTy(nty) = def {
- Some(prim_ty_to_ty(tcx, &path.segments, nty))
} else {
None
}
- } else {
- None
}
}
use mir::repr::Mir;
use mir::mir_map::MirMap;
use session::Session;
+use session::config::PanicStrategy;
use session::search_paths::PathKind;
use util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
use std::any::Any;
fn stability(&self, def: DefId) -> Option<attr::Stability>;
fn deprecation(&self, def: DefId) -> Option<attr::Deprecation>;
fn visibility(&self, def: DefId) -> ty::Visibility;
- fn closure_kind(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> ty::ClosureKind;
- fn closure_ty(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> ty::ClosureTy<'tcx>;
+ fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind;
+ fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
+ -> ty::ClosureTy<'tcx>;
fn item_variances(&self, def: DefId) -> ty::ItemVariances;
fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr>;
- fn item_type(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::TypeScheme<'tcx>;
+ fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::TypeScheme<'tcx>;
fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>>;
fn item_name(&self, def: DefId) -> ast::Name;
- fn item_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::GenericPredicates<'tcx>;
- fn item_super_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::GenericPredicates<'tcx>;
+ fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::GenericPredicates<'tcx>;
+ fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::GenericPredicates<'tcx>;
fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute>;
fn item_symbol(&self, def: DefId) -> String;
- fn trait_def(&self, tcx: &TyCtxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>;
- fn adt_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>;
+ fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)-> ty::TraitDef<'tcx>;
+ fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>;
fn method_arg_names(&self, did: DefId) -> Vec<String>;
fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec<DefId>;
// trait info
fn implementations_of_trait(&self, def_id: DefId) -> Vec<DefId>;
- fn provided_trait_methods(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Vec<Rc<ty::Method<'tcx>>>;
+ fn provided_trait_methods<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Vec<Rc<ty::Method<'tcx>>>;
fn trait_item_def_ids(&self, def: DefId)
-> Vec<ty::ImplOrTraitItemId>;
// impl info
fn impl_items(&self, impl_def_id: DefId) -> Vec<ty::ImplOrTraitItemId>;
- fn impl_trait_ref(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<ty::TraitRef<'tcx>>;
+ fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<ty::TraitRef<'tcx>>;
fn impl_polarity(&self, def: DefId) -> Option<hir::ImplPolarity>;
fn custom_coerce_unsized_kind(&self, def: DefId)
-> Option<ty::adjustment::CustomCoerceUnsized>;
- fn associated_consts(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Vec<Rc<ty::AssociatedConst<'tcx>>>;
+ fn associated_consts<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Vec<Rc<ty::AssociatedConst<'tcx>>>;
fn impl_parent(&self, impl_def_id: DefId) -> Option<DefId>;
// trait/impl-item info
- fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> Option<DefId>;
- fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<ty::ImplOrTraitItem<'tcx>>;
+ fn trait_of_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
+ -> Option<DefId>;
+ fn impl_or_trait_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<ty::ImplOrTraitItem<'tcx>>;
// flags
fn is_const_fn(&self, did: DefId) -> bool;
fn is_defaulted_trait(&self, did: DefId) -> bool;
fn is_impl(&self, did: DefId) -> bool;
fn is_default_impl(&self, impl_did: DefId) -> bool;
- fn is_extern_item(&self, tcx: &TyCtxt<'tcx>, did: DefId) -> bool;
+ fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool;
fn is_static_method(&self, did: DefId) -> bool;
fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool;
fn is_typedef(&self, did: DefId) -> bool;
fn is_staged_api(&self, cnum: ast::CrateNum) -> bool;
fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool;
fn is_allocator(&self, cnum: ast::CrateNum) -> bool;
+ fn is_panic_runtime(&self, cnum: ast::CrateNum) -> bool;
+ fn panic_strategy(&self, cnum: ast::CrateNum) -> PanicStrategy;
fn extern_crate(&self, cnum: ast::CrateNum) -> Option<ExternCrate>;
fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec<ast::Attribute>;
/// The name of the crate as it is referred to in source code of the current
fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec<ChildItem>;
// misc. metadata
- fn maybe_get_item_ast(&'tcx self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> FoundAst<'tcx>;
- fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<Mir<'tcx>>;
+ fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> FoundAst<'tcx>;
+ fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<Mir<'tcx>>;
fn is_item_mir_available(&self, def: DefId) -> bool;
// This is basically a 1-based range of ints, which is a little
// utility functions
fn metadata_filename(&self) -> &str;
fn metadata_section_name(&self, target: &Target) -> &str;
- fn encode_type(&self,
- tcx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
- -> Vec<u8>;
+ fn encode_type<'a>(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String)
+ -> Vec<u8>;
fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option<PathBuf>)>;
fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource;
fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option<ast::CrateNum>;
- fn encode_metadata(&self,
- tcx: &TyCtxt<'tcx>,
- reexports: &def::ExportMap,
- item_symbols: &RefCell<NodeMap<String>>,
- link_meta: &LinkMeta,
- reachable: &NodeSet,
- mir_map: &MirMap<'tcx>,
- krate: &hir::Crate) -> Vec<u8>;
+ fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ reexports: &def::ExportMap,
+ item_symbols: &RefCell<NodeMap<String>>,
+ link_meta: &LinkMeta,
+ reachable: &NodeSet,
+ mir_map: &MirMap<'tcx>,
+ krate: &hir::Crate) -> Vec<u8>;
fn metadata_encoding_version(&self) -> &[u8];
}
fn stability(&self, def: DefId) -> Option<attr::Stability> { bug!("stability") }
fn deprecation(&self, def: DefId) -> Option<attr::Deprecation> { bug!("deprecation") }
fn visibility(&self, def: DefId) -> ty::Visibility { bug!("visibility") }
- fn closure_kind(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> ty::ClosureKind { bug!("closure_kind") }
- fn closure_ty(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> ty::ClosureTy<'tcx> { bug!("closure_ty") }
+ fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { bug!("closure_kind") }
+ fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
+ -> ty::ClosureTy<'tcx> { bug!("closure_ty") }
fn item_variances(&self, def: DefId) -> ty::ItemVariances { bug!("item_variances") }
fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr> { bug!("repr_attrs") }
- fn item_type(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::TypeScheme<'tcx> { bug!("item_type") }
+ fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::TypeScheme<'tcx> { bug!("item_type") }
fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>> {
bug!("visible_parent_map")
}
fn item_name(&self, def: DefId) -> ast::Name { bug!("item_name") }
- fn item_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::GenericPredicates<'tcx> { bug!("item_predicates") }
- fn item_super_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::GenericPredicates<'tcx> { bug!("item_super_predicates") }
+ fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::GenericPredicates<'tcx> { bug!("item_predicates") }
+ fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::GenericPredicates<'tcx> { bug!("item_super_predicates") }
fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute> { bug!("item_attrs") }
fn item_symbol(&self, def: DefId) -> String { bug!("item_symbol") }
- fn trait_def(&self, tcx: &TyCtxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>
+ fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)-> ty::TraitDef<'tcx>
{ bug!("trait_def") }
- fn adt_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
+ fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
{ bug!("adt_def") }
fn method_arg_names(&self, did: DefId) -> Vec<String> { bug!("method_arg_names") }
fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec<DefId> { vec![] }
// trait info
fn implementations_of_trait(&self, def_id: DefId) -> Vec<DefId> { vec![] }
- fn provided_trait_methods(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Vec<Rc<ty::Method<'tcx>>> { bug!("provided_trait_methods") }
+ fn provided_trait_methods<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Vec<Rc<ty::Method<'tcx>>> { bug!("provided_trait_methods") }
fn trait_item_def_ids(&self, def: DefId)
-> Vec<ty::ImplOrTraitItemId> { bug!("trait_item_def_ids") }
// impl info
fn impl_items(&self, impl_def_id: DefId) -> Vec<ty::ImplOrTraitItemId>
{ bug!("impl_items") }
- fn impl_trait_ref(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<ty::TraitRef<'tcx>> { bug!("impl_trait_ref") }
+ fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<ty::TraitRef<'tcx>> { bug!("impl_trait_ref") }
fn impl_polarity(&self, def: DefId) -> Option<hir::ImplPolarity> { bug!("impl_polarity") }
fn custom_coerce_unsized_kind(&self, def: DefId)
-> Option<ty::adjustment::CustomCoerceUnsized>
{ bug!("custom_coerce_unsized_kind") }
- fn associated_consts(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Vec<Rc<ty::AssociatedConst<'tcx>>> { bug!("associated_consts") }
+ fn associated_consts<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Vec<Rc<ty::AssociatedConst<'tcx>>> { bug!("associated_consts") }
fn impl_parent(&self, def: DefId) -> Option<DefId> { bug!("impl_parent") }
// trait/impl-item info
- fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> Option<DefId> { bug!("trait_of_item") }
- fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<ty::ImplOrTraitItem<'tcx>> { bug!("impl_or_trait_item") }
+ fn trait_of_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
+ -> Option<DefId> { bug!("trait_of_item") }
+ fn impl_or_trait_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<ty::ImplOrTraitItem<'tcx>> { bug!("impl_or_trait_item") }
// flags
fn is_const_fn(&self, did: DefId) -> bool { bug!("is_const_fn") }
fn is_defaulted_trait(&self, did: DefId) -> bool { bug!("is_defaulted_trait") }
fn is_impl(&self, did: DefId) -> bool { bug!("is_impl") }
fn is_default_impl(&self, impl_did: DefId) -> bool { bug!("is_default_impl") }
- fn is_extern_item(&self, tcx: &TyCtxt<'tcx>, did: DefId) -> bool { bug!("is_extern_item") }
+ fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool
+ { bug!("is_extern_item") }
fn is_static_method(&self, did: DefId) -> bool { bug!("is_static_method") }
fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool { false }
fn is_typedef(&self, did: DefId) -> bool { bug!("is_typedef") }
fn is_staged_api(&self, cnum: ast::CrateNum) -> bool { bug!("is_staged_api") }
fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool { bug!("is_explicitly_linked") }
fn is_allocator(&self, cnum: ast::CrateNum) -> bool { bug!("is_allocator") }
+ fn is_panic_runtime(&self, cnum: ast::CrateNum) -> bool { bug!("is_panic_runtime") }
+ fn panic_strategy(&self, cnum: ast::CrateNum) -> PanicStrategy {
+ bug!("panic_strategy")
+ }
fn extern_crate(&self, cnum: ast::CrateNum) -> Option<ExternCrate> { bug!("extern_crate") }
fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec<ast::Attribute>
{ bug!("crate_attrs") }
{ bug!("crate_top_level_items") }
// misc. metadata
- fn maybe_get_item_ast(&'tcx self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> FoundAst<'tcx> { bug!("maybe_get_item_ast") }
- fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<Mir<'tcx>> { bug!("maybe_get_item_mir") }
+ fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> FoundAst<'tcx> { bug!("maybe_get_item_ast") }
+ fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<Mir<'tcx>> { bug!("maybe_get_item_mir") }
fn is_item_mir_available(&self, def: DefId) -> bool {
bug!("is_item_mir_available")
}
// utility functions
fn metadata_filename(&self) -> &str { bug!("metadata_filename") }
fn metadata_section_name(&self, target: &Target) -> &str { bug!("metadata_section_name") }
- fn encode_type(&self,
- tcx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
- -> Vec<u8> {
+ fn encode_type<'a>(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String)
+ -> Vec<u8> {
bug!("encode_type")
}
fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option<PathBuf>)>
{ vec![] }
fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource { bug!("used_crate_source") }
fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option<ast::CrateNum> { None }
- fn encode_metadata(&self,
- tcx: &TyCtxt<'tcx>,
- reexports: &def::ExportMap,
- item_symbols: &RefCell<NodeMap<String>>,
- link_meta: &LinkMeta,
- reachable: &NodeSet,
- mir_map: &MirMap<'tcx>,
- krate: &hir::Crate) -> Vec<u8> { vec![] }
+ fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ reexports: &def::ExportMap,
+ item_symbols: &RefCell<NodeMap<String>>,
+ link_meta: &LinkMeta,
+ reachable: &NodeSet,
+ mir_map: &MirMap<'tcx>,
+ krate: &hir::Crate) -> Vec<u8> { vec![] }
fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") }
}
use hir::def_id::DefId;
pub trait EncodingContext<'tcx> {
- fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: Ty<'tcx>);
fn encode_substs(&self, encoder: &mut OpaqueEncoder, substs: &Substs<'tcx>);
}
}
pub trait DecodingContext<'tcx> {
- fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx>;
fn decode_substs(&self, decoder: &mut OpaqueDecoder) -> Substs<'tcx>;
fn translate_def_id(&self, def_id: DefId) -> DefId;
#[derive(Clone)]
pub struct DataFlowContext<'a, 'tcx: 'a, O> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// a name for the analysis using this dataflow instance
analysis_name: &'static str,
}
impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
- pub fn new(tcx: &'a TyCtxt<'tcx>,
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
analysis_name: &'static str,
decl: Option<&hir::FnDecl>,
cfg: &cfg::CFG,
// explored. For example, if it's a live NodeItem that is a
// function, then we should explore its block to check for codes that
// may need to be marked as live.
-fn should_explore(tcx: &TyCtxt, node_id: ast::NodeId) -> bool {
+fn should_explore<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ node_id: ast::NodeId) -> bool {
match tcx.map.find(node_id) {
Some(ast_map::NodeItem(..)) |
Some(ast_map::NodeImplItem(..)) |
struct MarkSymbolVisitor<'a, 'tcx: 'a> {
worklist: Vec<ast::NodeId>,
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
struct_has_extern_repr: bool,
ignore_non_const_paths: bool,
}
impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>,
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a, 'tcx> {
MarkSymbolVisitor {
worklist: worklist,
}
}
-fn create_and_seed_worklist(tcx: &TyCtxt,
- access_levels: &privacy::AccessLevels,
- krate: &hir::Crate) -> Vec<ast::NodeId> {
+fn create_and_seed_worklist<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ access_levels: &privacy::AccessLevels,
+ krate: &hir::Crate)
+ -> Vec<ast::NodeId> {
let mut worklist = Vec::new();
for (id, _) in &access_levels.map {
worklist.push(*id);
return life_seeder.worklist;
}
-fn find_live(tcx: &TyCtxt,
- access_levels: &privacy::AccessLevels,
- krate: &hir::Crate)
- -> Box<HashSet<ast::NodeId>> {
+fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ access_levels: &privacy::AccessLevels,
+ krate: &hir::Crate)
+ -> Box<HashSet<ast::NodeId>> {
let worklist = create_and_seed_worklist(tcx, access_levels, krate);
let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist);
symbol_visitor.mark_live_symbols();
}
struct DeadVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
/// an error. We could do this also by checking the parents, but
/// this is how the code is setup and it seems harmless enough.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, item: &hir::Item) {
}
}
-pub fn check_crate(tcx: &TyCtxt, access_levels: &privacy::AccessLevels) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ access_levels: &privacy::AccessLevels) {
let _task = tcx.dep_graph.in_task(DepNode::DeadCheck);
let krate = tcx.map.krate();
let live_symbols = find_live(tcx, access_levels, krate);
use syntax::ast;
use session;
-use session::config;
+use session::config::{self, PanicStrategy};
use middle::cstore::LinkagePreference::{self, RequireStatic, RequireDynamic};
use util::nodemap::FnvHashMap;
}
// We've gotten this far because we're emitting some form of a final
- // artifact which means that we're going to need an allocator of some form.
- // No allocator may have been required or linked so far, so activate one
- // here if one isn't set.
- activate_allocator(sess, &mut ret);
+ // artifact which means that we may need to inject dependencies of some
+ // form.
+ //
+ // Things like allocators and panic runtimes may not have been activated
+ // quite yet, so do so here.
+ activate_injected_dep(sess.injected_allocator.get(), &mut ret,
+ &|cnum| sess.cstore.is_allocator(cnum));
+ activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret,
+ &|cnum| sess.cstore.is_panic_runtime(cnum));
// When dylib B links to dylib A, then when using B we must also link to A.
// It could be the case, however, that the rlib for A is present (hence we
}
}).collect::<Vec<_>>();
- // Our allocator may not have been activated as it's not flagged with
- // explicitly_linked, so flag it here if necessary.
- activate_allocator(sess, &mut ret);
+ // Our allocator/panic runtime may not have been linked above if it wasn't
+ // explicitly linked, which is the case for any injected dependency. Handle
+ // that here and activate them.
+ activate_injected_dep(sess.injected_allocator.get(), &mut ret,
+ &|cnum| sess.cstore.is_allocator(cnum));
+ activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret,
+ &|cnum| sess.cstore.is_panic_runtime(cnum));
Some(ret)
}
// Given a list of how to link upstream dependencies so far, ensure that an
-// allocator is activated. This will not do anything if one was transitively
-// included already (e.g. via a dylib or explicitly so).
+// injected dependency is activated. This will not do anything if one was
+// transitively included already (e.g. via a dylib or explicitly so).
//
-// If an allocator was not found then we're guaranteed the metadata::creader
-// module has injected an allocator dependency (not listed as a required
-// dependency) in the session's `injected_allocator` field. If this field is not
-// set then this compilation doesn't actually need an allocator and we can also
-// skip this step entirely.
-fn activate_allocator(sess: &session::Session, list: &mut DependencyList) {
- let mut allocator_found = false;
+// If an injected dependency was not found then we're guaranteed the
+// metadata::creader module has injected that dependency (not listed as
+// a required dependency) in one of the session's field. If this field is not
+// set then this compilation doesn't actually need the dependency and we can
+// also skip this step entirely.
+fn activate_injected_dep(injected: Option<ast::CrateNum>,
+ list: &mut DependencyList,
+ replaces_injected: &Fn(ast::CrateNum) -> bool) {
for (i, slot) in list.iter().enumerate() {
let cnum = (i + 1) as ast::CrateNum;
- if !sess.cstore.is_allocator(cnum) {
+ if !replaces_injected(cnum) {
continue
}
- if let Linkage::NotLinked = *slot {
- continue
+ if *slot != Linkage::NotLinked {
+ return
}
- allocator_found = true;
}
- if !allocator_found {
- if let Some(injected_allocator) = sess.injected_allocator.get() {
- let idx = injected_allocator as usize - 1;
- assert_eq!(list[idx], Linkage::NotLinked);
- list[idx] = Linkage::Static;
- }
+ if let Some(injected) = injected {
+ let idx = injected as usize - 1;
+ assert_eq!(list[idx], Linkage::NotLinked);
+ list[idx] = Linkage::Static;
}
}
return
}
let mut allocator = None;
+ let mut panic_runtime = None;
for (i, linkage) in list.iter().enumerate() {
- let cnum = (i + 1) as ast::CrateNum;
- if !sess.cstore.is_allocator(cnum) {
- continue
- }
if let Linkage::NotLinked = *linkage {
continue
}
- if let Some(prev_alloc) = allocator {
- let prev_name = sess.cstore.crate_name(prev_alloc);
- let cur_name = sess.cstore.crate_name(cnum);
- sess.err(&format!("cannot link together two \
- allocators: {} and {}",
- prev_name, cur_name));
+ let cnum = (i + 1) as ast::CrateNum;
+ if sess.cstore.is_allocator(cnum) {
+ if let Some(prev) = allocator {
+ let prev_name = sess.cstore.crate_name(prev);
+ let cur_name = sess.cstore.crate_name(cnum);
+ sess.err(&format!("cannot link together two \
+ allocators: {} and {}",
+ prev_name, cur_name));
+ }
+ allocator = Some(cnum);
+ }
+
+ if sess.cstore.is_panic_runtime(cnum) {
+ if let Some((prev, _)) = panic_runtime {
+ let prev_name = sess.cstore.crate_name(prev);
+ let cur_name = sess.cstore.crate_name(cnum);
+ sess.err(&format!("cannot link together two \
+ panic runtimes: {} and {}",
+ prev_name, cur_name));
+ }
+ panic_runtime = Some((cnum, sess.cstore.panic_strategy(cnum)));
+ }
+ }
+
+ // If we found a panic runtime, then we know by this point that it's the
+ // only one, but we perform validation here that all the panic strategy
+ // compilation modes for the whole DAG are valid.
+ if let Some((cnum, found_strategy)) = panic_runtime {
+ let desired_strategy = sess.opts.cg.panic.clone();
+
+ // First up, validate that our selected panic runtime is indeed exactly
+ // our same strategy.
+ if found_strategy != desired_strategy {
+ sess.err(&format!("the linked panic runtime `{}` is \
+ not compiled with this crate's \
+ panic strategy `{}`",
+ sess.cstore.crate_name(cnum),
+ desired_strategy.desc()));
+ }
+
+ // Next up, verify that all other crates are compatible with this panic
+ // strategy. If the dep isn't linked, we ignore it, and if our strategy
+ // is abort then it's compatible with everything. Otherwise all crates'
+ // panic strategy must match our own.
+ for (i, linkage) in list.iter().enumerate() {
+ if let Linkage::NotLinked = *linkage {
+ continue
+ }
+ if desired_strategy == PanicStrategy::Abort {
+ continue
+ }
+ let cnum = (i + 1) as ast::CrateNum;
+ let found_strategy = sess.cstore.panic_strategy(cnum);
+ if desired_strategy == found_strategy {
+ continue
+ }
+
+ sess.err(&format!("the crate `{}` is compiled with the \
+ panic strategy `{}` which is \
+ incompatible with this crate's \
+ strategy of `{}`",
+ sess.cstore.crate_name(cnum),
+ found_strategy.desc(),
+ desired_strategy.desc()));
}
- allocator = Some(cnum);
}
}
}
struct EffectCheckVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// Whether we're in an unsafe context.
unsafe_context: UnsafeContext,
}
}
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _task = tcx.dep_graph.in_task(DepNode::EffectCheck);
let mut visitor = EffectCheckVisitor {
use hir::pat_util;
use hir::def::Def;
use hir::def_id::{DefId};
-use infer;
+use infer::InferCtxt;
use middle::mem_categorization as mc;
use ty::{self, TyCtxt, adjustment};
}
impl OverloadedCallType {
- fn from_trait_id(tcx: &TyCtxt, trait_id: DefId)
- -> OverloadedCallType {
+ fn from_trait_id(tcx: TyCtxt, trait_id: DefId) -> OverloadedCallType {
for &(maybe_function_trait, overloaded_call_type) in &[
(tcx.lang_items.fn_once_trait(), FnOnceOverloadedCall),
(tcx.lang_items.fn_mut_trait(), FnMutOverloadedCall),
bug!("overloaded call didn't map to known function trait")
}
- fn from_method_id(tcx: &TyCtxt, method_id: DefId)
- -> OverloadedCallType {
+ fn from_method_id(tcx: TyCtxt, method_id: DefId) -> OverloadedCallType {
let method = tcx.impl_or_trait_item(method_id);
OverloadedCallType::from_trait_id(tcx, method.container().id())
}
// mem_categorization, it requires a TYPER, which is a type that
// supplies types from the tree. After type checking is complete, you
// can just use the tcx as the typer.
-pub struct ExprUseVisitor<'d, 't, 'a: 't, 'tcx:'a+'d> {
- typer: &'t infer::InferCtxt<'a, 'tcx>,
- mc: mc::MemCategorizationContext<'t, 'a, 'tcx>,
- delegate: &'d mut Delegate<'tcx>,
+pub struct ExprUseVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ mc: mc::MemCategorizationContext<'a, 'gcx, 'tcx>,
+ delegate: &'a mut Delegate<'tcx>,
}
// If the TYPER results in an error, it's because the type check
ByRef,
}
-impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
- pub fn new(delegate: &'d mut (Delegate<'tcx>+'d),
- typer: &'t infer::InferCtxt<'a, 'tcx>)
- -> ExprUseVisitor<'d,'t,'a,'tcx> where 'tcx:'a+'d
+impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
+ pub fn new(delegate: &'a mut (Delegate<'tcx>+'a),
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self
{
- let mc: mc::MemCategorizationContext<'t, 'a, 'tcx> =
- mc::MemCategorizationContext::new(typer);
- ExprUseVisitor { typer: typer, mc: mc, delegate: delegate }
+ ExprUseVisitor {
+ mc: mc::MemCategorizationContext::new(infcx),
+ delegate: delegate
+ }
}
pub fn walk_fn(&mut self,
decl: &hir::FnDecl,
body: &hir::Block) {
for arg in &decl.inputs {
- let arg_ty = return_if_err!(self.typer.node_ty(arg.pat.id));
+ let arg_ty = return_if_err!(self.mc.infcx.node_ty(arg.pat.id));
let fn_body_scope = self.tcx().region_maps.node_extent(body.id);
let arg_cmt = self.mc.cat_rvalue(
}
}
- fn tcx(&self) -> &'t TyCtxt<'tcx> {
- self.typer.tcx
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
+ self.mc.infcx.tcx
}
fn delegate_consume(&mut self,
debug!("delegate_consume(consume_id={}, cmt={:?})",
consume_id, cmt);
- let mode = copy_or_move(self.typer, &cmt, DirectRefMove);
+ let mode = copy_or_move(self.mc.infcx, &cmt, DirectRefMove);
self.delegate.consume(consume_id, consume_span, cmt, mode);
}
hir::ExprAddrOf(m, ref base) => { // &base
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
- let expr_ty = return_if_err!(self.typer.node_ty(expr.id));
+ let expr_ty = return_if_err!(self.mc.infcx.node_ty(expr.id));
if let ty::TyRef(&r, _) = expr_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
self.borrow_expr(&base, r, bk, AddrOf);
}
fn walk_callee(&mut self, call: &hir::Expr, callee: &hir::Expr) {
- let callee_ty = return_if_err!(self.typer.expr_ty_adjusted(callee));
+ let callee_ty = return_if_err!(self.mc.infcx.expr_ty_adjusted(callee));
debug!("walk_callee: callee={:?} callee_ty={:?}",
callee, callee_ty);
let call_scope = self.tcx().region_maps.node_extent(call.id);
ty::TyError => { }
_ => {
let overloaded_call_type =
- match self.typer.node_method_id(ty::MethodCall::expr(call.id)) {
+ match self.mc.infcx.node_method_id(ty::MethodCall::expr(call.id)) {
Some(method_id) => {
OverloadedCallType::from_method_id(self.tcx(), method_id)
}
match local.init {
None => {
let delegate = &mut self.delegate;
- pat_util::pat_bindings(&self.typer.tcx.def_map, &local.pat,
+ pat_util::pat_bindings(&self.mc.infcx.tcx.def_map, &local.pat,
|_, id, span, _| {
delegate.decl_without_init(id, span);
})
// consumed or borrowed as part of the automatic adjustment
// process.
fn walk_adjustment(&mut self, expr: &hir::Expr) {
- let typer = self.typer;
+ let infcx = self.mc.infcx;
//NOTE(@jroesch): mixed RefCell borrow causes crash
- let adj = typer.adjustments().get(&expr.id).map(|x| x.clone());
+ let adj = infcx.adjustments().get(&expr.id).map(|x| x.clone());
if let Some(adjustment) = adj {
match adjustment {
adjustment::AdjustReifyFnPointer |
for i in 0..autoderefs {
let deref_id = ty::MethodCall::autoderef(expr.id, i as u32);
- match self.typer.node_method_ty(deref_id) {
+ match self.mc.infcx.node_method_ty(deref_id) {
None => {}
Some(method_ty) => {
let cmt = return_if_err!(self.mc.cat_expr_autoderefd(expr, i));
pass_args: PassArgs)
-> bool
{
- if !self.typer.is_method_call(expr.id) {
+ if !self.mc.infcx.is_method_call(expr.id) {
return false;
}
PatKind::Ident(hir::BindByRef(_), _, _) =>
mode.lub(BorrowingMatch),
PatKind::Ident(hir::BindByValue(_), _, _) => {
- match copy_or_move(self.typer, &cmt_pat, PatBindingMove) {
+ match copy_or_move(self.mc.infcx, &cmt_pat, PatBindingMove) {
Copy => mode.lub(CopyingMatch),
Move(_) => mode.lub(MovingMatch),
}
pat);
let mc = &self.mc;
- let typer = self.typer;
+ let infcx = self.mc.infcx;
let def_map = &self.tcx().def_map;
let delegate = &mut self.delegate;
return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| {
match_mode);
// pat_ty: the type of the binding being produced.
- let pat_ty = return_if_err!(typer.node_ty(pat.id));
+ let pat_ty = return_if_err!(infcx.node_ty(pat.id));
// Each match binding is effectively an assignment to the
// binding being produced.
}
}
PatKind::Ident(hir::BindByValue(_), _, _) => {
- let mode = copy_or_move(typer, &cmt_pat, PatBindingMove);
+ let mode = copy_or_move(infcx, &cmt_pat, PatBindingMove);
debug!("walk_pat binding consuming pat");
delegate.consume_pat(pat, cmt_pat, mode);
}
// the leaves of the pattern tree structure.
return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
let def_map = def_map.borrow();
- let tcx = typer.tcx;
+ let tcx = infcx.tcx;
match pat.node {
PatKind::TupleStruct(..) | PatKind::Path(..) | PatKind::QPath(..) |
let id_var = freevar.def.var_id();
let upvar_id = ty::UpvarId { var_id: id_var,
closure_expr_id: closure_expr.id };
- let upvar_capture = self.typer.upvar_capture(upvar_id).unwrap();
+ let upvar_capture = self.mc.infcx.upvar_capture(upvar_id).unwrap();
let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
fn_decl_span,
freevar.def));
match upvar_capture {
ty::UpvarCapture::ByValue => {
- let mode = copy_or_move(self.typer, &cmt_var, CaptureMove);
+ let mode = copy_or_move(self.mc.infcx, &cmt_var, CaptureMove);
self.delegate.consume(closure_expr.id, freevar.span, cmt_var, mode);
}
ty::UpvarCapture::ByRef(upvar_borrow) => {
// Create the cmt for the variable being borrowed, from the
// caller's perspective
let var_id = upvar_def.var_id();
- let var_ty = self.typer.node_ty(var_id)?;
+ let var_ty = self.mc.infcx.node_ty(var_id)?;
self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def)
}
}
-fn copy_or_move<'a, 'tcx>(typer: &infer::InferCtxt<'a, 'tcx>,
- cmt: &mc::cmt<'tcx>,
- move_reason: MoveReason)
- -> ConsumeMode
+fn copy_or_move<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ cmt: &mc::cmt<'tcx>,
+ move_reason: MoveReason)
+ -> ConsumeMode
{
- if typer.type_moves_by_default(cmt.ty, cmt.span) {
+ if infcx.type_moves_by_default(cmt.ty, cmt.span) {
Move(move_reason)
} else {
Copy
}
}
- pub fn relate_free_regions_from_predicates<'tcx>(&mut self,
- _tcx: &TyCtxt<'tcx>,
- predicates: &[ty::Predicate<'tcx>]) {
+ pub fn relate_free_regions_from_predicates(&mut self,
+ predicates: &[ty::Predicate]) {
debug!("relate_free_regions_from_predicates(predicates={:?})", predicates);
for predicate in predicates {
match *predicate {
/// Determines whether one region is a subregion of another. This is intended to run *after
/// inference* and sadly the logic is somewhat duplicated with the code in infer.rs.
pub fn is_subregion_of(&self,
- tcx: &TyCtxt,
+ tcx: TyCtxt,
sub_region: ty::Region,
super_region: ty::Region)
-> bool {
use dep_graph::DepNode;
use hir::def::Def;
use hir::def_id::DefId;
-use infer::{InferCtxt, new_infer_ctxt};
+use infer::InferCtxt;
use traits::ProjectionMode;
use ty::{self, Ty, TyCtxt};
use ty::layout::{LayoutError, Pointer, SizeSkeleton};
use hir::intravisit::{self, Visitor, FnKind};
use hir;
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut visitor = ItemVisitor {
tcx: tcx
};
}
struct ItemVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>
+ tcx: TyCtxt<'a, 'tcx, 'tcx>
}
impl<'a, 'tcx> ItemVisitor<'a, 'tcx> {
fn visit_const(&mut self, item_id: ast::NodeId, expr: &hir::Expr) {
let param_env = ty::ParameterEnvironment::for_item(self.tcx, item_id);
- let infcx = new_infer_ctxt(self.tcx, &self.tcx.tables,
- Some(param_env),
- ProjectionMode::Any);
- let mut visitor = ExprVisitor {
- infcx: &infcx
- };
- visitor.visit_expr(expr);
+ self.tcx.infer_ctxt(None, Some(param_env), ProjectionMode::Any).enter(|infcx| {
+ let mut visitor = ExprVisitor {
+ infcx: &infcx
+ };
+ visitor.visit_expr(expr);
+ });
}
}
-struct ExprVisitor<'a, 'tcx: 'a> {
- infcx: &'a InferCtxt<'a, 'tcx>
+struct ExprVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> ExprVisitor<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
let intrinsic = match self.infcx.tcx.lookup_item_type(def_id).ty.sty {
ty::TyFnDef(_, _, ref bfty) => bfty.abi == RustIntrinsic,
intrinsic && self.infcx.tcx.item_name(def_id).as_str() == "transmute"
}
- fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) {
+ fn check_transmute(&self, span: Span, from: Ty<'gcx>, to: Ty<'gcx>, id: ast::NodeId) {
let sk_from = SizeSkeleton::compute(from, self.infcx);
let sk_to = SizeSkeleton::compute(to, self.infcx);
}
// Try to display a sensible error with as much information as possible.
- let skeleton_string = |ty: Ty<'tcx>, sk| {
+ let skeleton_string = |ty: Ty<'gcx>, sk| {
match sk {
Ok(SizeSkeleton::Known(size)) => {
format!("{} bits", size.bits())
impl<'a, 'tcx, 'v> Visitor<'v> for ItemVisitor<'a, 'tcx> {
// const, static and N in [T; N].
fn visit_expr(&mut self, expr: &hir::Expr) {
- let infcx = new_infer_ctxt(self.tcx, &self.tcx.tables,
- None, ProjectionMode::Any);
- let mut visitor = ExprVisitor {
- infcx: &infcx
- };
- visitor.visit_expr(expr);
+ self.tcx.infer_ctxt(None, None, ProjectionMode::Any).enter(|infcx| {
+ let mut visitor = ExprVisitor {
+ infcx: &infcx
+ };
+ visitor.visit_expr(expr);
+ });
}
fn visit_trait_item(&mut self, item: &hir::TraitItem) {
fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
b: &'v hir::Block, s: Span, id: ast::NodeId) {
- match fk {
- FnKind::ItemFn(..) | FnKind::Method(..) => {
- let param_env = ty::ParameterEnvironment::for_item(self.tcx, id);
- let infcx = new_infer_ctxt(self.tcx, &self.tcx.tables,
- Some(param_env),
- ProjectionMode::Any);
- let mut visitor = ExprVisitor {
- infcx: &infcx
- };
- visitor.visit_fn(fk, fd, b, s, id);
- }
- FnKind::Closure(..) => {
- span_bug!(s, "intrinsicck: closure outside of function")
- }
+ if let FnKind::Closure(..) = fk {
+ span_bug!(s, "intrinsicck: closure outside of function")
}
+ let param_env = ty::ParameterEnvironment::for_item(self.tcx, id);
+ self.tcx.infer_ctxt(None, Some(param_env), ProjectionMode::Any).enter(|infcx| {
+ let mut visitor = ExprVisitor {
+ infcx: &infcx
+ };
+ visitor.visit_fn(fk, fd, b, s, id);
+ });
}
}
-impl<'a, 'tcx, 'v> Visitor<'v> for ExprVisitor<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for ExprVisitor<'a, 'gcx, 'tcx> {
fn visit_expr(&mut self, expr: &hir::Expr) {
if let hir::ExprPath(..) = expr.node {
match self.infcx.tcx.resolve_expr(expr) {
use hir::pat_util;
use ty::{self, TyCtxt, ParameterEnvironment};
use traits::{self, ProjectionMode};
-use infer;
use ty::subst::Subst;
use lint;
use util::nodemap::NodeMap;
ExitNode
}
-fn live_node_kind_to_string(lnk: LiveNodeKind, cx: &TyCtxt) -> String {
- let cm = cx.sess.codemap();
+fn live_node_kind_to_string(lnk: LiveNodeKind, tcx: TyCtxt) -> String {
+ let cm = tcx.sess.codemap();
match lnk {
FreeVarNode(s) => {
format!("Free var node [{}]", cm.span_to_string(s))
fn visit_arm(&mut self, a: &hir::Arm) { visit_arm(self, a); }
}
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _task = tcx.dep_graph.in_task(DepNode::Liveness);
tcx.map.krate().visit_all_items(&mut IrMaps::new(tcx));
tcx.sess.abort_if_errors();
}
struct IrMaps<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
num_live_nodes: usize,
num_vars: usize,
}
impl<'a, 'tcx> IrMaps<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>) -> IrMaps<'a, 'tcx> {
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> IrMaps<'a, 'tcx> {
IrMaps {
tcx: tcx,
num_live_nodes: 0,
fn fn_ret(&self, id: NodeId) -> ty::PolyFnOutput<'tcx> {
let fn_ty = self.ir.tcx.node_id_to_type(id);
match fn_ty.sty {
- ty::TyClosure(closure_def_id, ref substs) =>
+ ty::TyClosure(closure_def_id, substs) =>
self.ir.tcx.closure_type(closure_def_id, substs).sig.output(),
_ => fn_ty.fn_ret()
}
ty::FnConverging(t_ret)
if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() => {
- let param_env = ParameterEnvironment::for_item(&self.ir.tcx, id);
- let t_ret_subst = t_ret.subst(&self.ir.tcx, ¶m_env.free_substs);
- let infcx = infer::new_infer_ctxt(&self.ir.tcx,
- &self.ir.tcx.tables,
- Some(param_env),
- ProjectionMode::Any);
- let cause = traits::ObligationCause::dummy();
- let norm = traits::fully_normalize(&infcx,
- cause,
- &t_ret_subst);
-
- if norm.unwrap().is_nil() {
- // for nil return types, it is ok to not return a value expl.
- } else {
+ let param_env = ParameterEnvironment::for_item(self.ir.tcx, id);
+ let t_ret_subst = t_ret.subst(self.ir.tcx, ¶m_env.free_substs);
+ let is_nil = self.ir.tcx.infer_ctxt(None, Some(param_env),
+ ProjectionMode::Any).enter(|infcx| {
+ let cause = traits::ObligationCause::dummy();
+ traits::fully_normalize(&infcx, cause, &t_ret_subst).unwrap().is_nil()
+ });
+
+ // for nil return types, it is ok to not return a value expl.
+ if !is_nil {
let ends_with_stmt = match body.expr {
None if !body.stmts.is_empty() =>
match body.stmts.last().unwrap().node {
use hir::def_id::DefId;
use hir::map as ast_map;
-use infer;
+use infer::InferCtxt;
use middle::const_qualif::ConstQualif;
use hir::def::Def;
use ty::adjustment;
}
#[derive(Copy, Clone)]
-pub struct MemCategorizationContext<'t, 'a: 't, 'tcx : 'a> {
- pub typer: &'t infer::InferCtxt<'a, 'tcx>,
+pub struct MemCategorizationContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ pub infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
}
pub type McResult<T> = Result<T, ()>;
ret
}
- fn from_local(tcx: &TyCtxt, id: ast::NodeId) -> MutabilityCategory {
+ fn from_local(tcx: TyCtxt, id: ast::NodeId) -> MutabilityCategory {
let ret = match tcx.map.get(id) {
ast_map::NodeLocal(p) => match p.node {
PatKind::Ident(bind_mode, _, _) => {
}
}
-impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
- pub fn new(typer: &'t infer::InferCtxt<'a, 'tcx>) -> MemCategorizationContext<'t, 'a, 'tcx> {
- MemCategorizationContext { typer: typer }
+impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>)
+ -> MemCategorizationContext<'a, 'gcx, 'tcx> {
+ MemCategorizationContext { infcx: infcx }
}
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
- self.typer.tcx
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
+ self.infcx.tcx
}
fn expr_ty(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
- match self.typer.node_ty(expr.id) {
+ match self.infcx.node_ty(expr.id) {
Ok(t) => Ok(t),
Err(()) => {
debug!("expr_ty({:?}) yielded Err", expr);
let unadjusted_ty = self.expr_ty(expr)?;
Ok(unadjusted_ty.adjust(
self.tcx(), expr.span, expr.id,
- self.typer.adjustments().get(&expr.id),
- |method_call| self.typer.node_method_ty(method_call)))
+ self.infcx.adjustments().get(&expr.id),
+ |method_call| self.infcx.node_method_ty(method_call)))
}
fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
- self.typer.node_ty(id)
+ self.infcx.node_ty(id)
}
fn pat_ty(&self, pat: &hir::Pat) -> McResult<Ty<'tcx>> {
- let base_ty = self.typer.node_ty(pat.id)?;
+ let base_ty = self.infcx.node_ty(pat.id)?;
// FIXME (Issue #18207): This code detects whether we are
// looking at a `ref x`, and if so, figures out what the type
// *being borrowed* is. But ideally we would put in a more
}
pub fn cat_expr(&self, expr: &hir::Expr) -> McResult<cmt<'tcx>> {
- match self.typer.adjustments().get(&expr.id) {
+ match self.infcx.adjustments().get(&expr.id) {
None => {
// No adjustments.
self.cat_expr_unadjusted(expr)
hir::ExprIndex(ref base, _) => {
let method_call = ty::MethodCall::expr(expr.id());
let context = InteriorOffsetKind::Index;
- match self.typer.node_method_ty(method_call) {
+ match self.infcx.node_method_ty(method_call) {
Some(method_ty) => {
// If this is an index implemented by a method call, then it
// will include an implicit deref of the result.
let ty = self.node_ty(fn_node_id)?;
match ty.sty {
ty::TyClosure(closure_id, _) => {
- match self.typer.closure_kind(closure_id) {
+ match self.infcx.closure_kind(closure_id) {
Some(kind) => {
self.cat_upvar(id, span, var_id, fn_node_id, kind)
}
// for that.
let upvar_id = ty::UpvarId { var_id: var_id,
closure_expr_id: fn_node_id };
- let upvar_capture = self.typer.upvar_capture(upvar_id).unwrap();
+ let upvar_capture = self.infcx.upvar_capture(upvar_id).unwrap();
let cmt_result = match upvar_capture {
ty::UpvarCapture::ByValue => {
cmt_result
/// Returns the lifetime of a temporary created by expr with id `id`.
/// This could be `'static` if `id` is part of a constant expression.
pub fn temporary_scope(&self, id: ast::NodeId) -> ty::Region {
- match self.typer.temporary_scope(id) {
+ match self.infcx.temporary_scope(id) {
Some(scope) => ty::ReScope(scope),
None => ty::ReStatic
}
expr_id: node.id(),
autoderef: deref_cnt as u32
};
- let method_ty = self.typer.node_method_ty(method_call);
+ let method_ty = self.infcx.node_method_ty(method_call);
debug!("cat_deref: method_call={:?} method_ty={:?}",
method_call, method_ty.map(|ty| ty));
//! - `base_cmt`: the cmt of `elt`
let method_call = ty::MethodCall::expr(elt.id());
- let method_ty = self.typer.node_method_ty(method_call);
+ let method_ty = self.infcx.node_method_ty(method_call);
let element_ty = match method_ty {
Some(method_ty) => {
slice_pat: &hir::Pat)
-> McResult<(cmt<'tcx>, hir::Mutability, ty::Region)> {
let slice_ty = self.node_ty(slice_pat.id)?;
- let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(),
- slice_pat,
- slice_ty);
+ let (slice_mutbl, slice_r) = vec_slice_info(slice_pat, slice_ty);
let context = InteriorOffsetKind::Pattern;
let cmt_vec = self.deref_vec(slice_pat, vec_cmt, context)?;
let cmt_slice = self.cat_index(slice_pat, cmt_vec, context)?;
/// In a pattern like [a, b, ..c], normally `c` has slice type, but if you have [a, b,
/// ..ref c], then the type of `ref c` will be `&&[]`, so to extract the slice details we
/// have to recurse through rptrs.
- fn vec_slice_info(tcx: &TyCtxt,
- pat: &hir::Pat,
- slice_ty: Ty)
+ fn vec_slice_info(pat: &hir::Pat, slice_ty: Ty)
-> (hir::Mutability, ty::Region) {
match slice_ty.sty {
ty::TyRef(r, ref mt) => match mt.ty.sty {
ty::TySlice(_) => (mt.mutbl, *r),
- _ => vec_slice_info(tcx, pat, mt.ty),
+ _ => vec_slice_info(pat, mt.ty),
},
_ => {
}
pub fn cat_pattern<F>(&self, cmt: cmt<'tcx>, pat: &hir::Pat, mut op: F) -> McResult<()>
- where F: FnMut(&MemCategorizationContext<'t, 'a, 'tcx>, cmt<'tcx>, &hir::Pat),
+ where F: FnMut(&MemCategorizationContext<'a, 'gcx, 'tcx>, cmt<'tcx>, &hir::Pat),
{
self.cat_pattern_(cmt, pat, &mut op)
}
// FIXME(#19596) This is a workaround, but there should be a better way to do this
fn cat_pattern_<F>(&self, cmt: cmt<'tcx>, pat: &hir::Pat, op: &mut F)
-> McResult<()>
- where F : FnMut(&MemCategorizationContext<'t, 'a, 'tcx>, cmt<'tcx>, &hir::Pat),
+ where F : FnMut(&MemCategorizationContext<'a, 'gcx, 'tcx>, cmt<'tcx>, &hir::Pat),
{
// Here, `cmt` is the categorization for the value being
// matched and pat is the pattern it is being matched against.
}
/// Returns `FreelyAliasable(_)` if this lvalue represents a freely aliasable pointer type.
- pub fn freely_aliasable(&self, ctxt: &TyCtxt<'tcx>)
- -> Aliasability {
+ pub fn freely_aliasable(&self) -> Aliasability {
// Maybe non-obvious: copied upvars can only be considered
// non-aliasable in once closures, since any other kind can be
// aliased and eventually recused.
Categorization::Downcast(ref b, _) |
Categorization::Interior(ref b, _) => {
// Aliasability depends on base cmt
- b.freely_aliasable(ctxt)
+ b.freely_aliasable()
}
Categorization::Deref(ref b, _, Unique) => {
- let sub = b.freely_aliasable(ctxt);
+ let sub = b.freely_aliasable();
if b.mutbl.is_mutable() {
// Aliasability depends on base cmt alone
sub
}
- pub fn descriptive_string(&self, tcx: &TyCtxt) -> String {
+ pub fn descriptive_string(&self, tcx: TyCtxt) -> String {
match self.cat {
Categorization::StaticItem => {
"static item".to_string()
}
}
-fn method_might_be_inlined(tcx: &TyCtxt, sig: &hir::MethodSig,
- impl_item: &hir::ImplItem,
- impl_src: DefId) -> bool {
+fn method_might_be_inlined<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ sig: &hir::MethodSig,
+ impl_item: &hir::ImplItem,
+ impl_src: DefId) -> bool {
if attr::requests_inline(&impl_item.attrs) ||
generics_require_inlining(&sig.generics) {
return true
// Information needed while computing reachability.
struct ReachableContext<'a, 'tcx: 'a> {
// The type context.
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
// The set of items which must be exported in the linkage sense.
reachable_symbols: NodeSet,
// A worklist of item IDs. Each item ID in this worklist will be inlined
impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
// Creates a new reachability computation context.
- fn new(tcx: &'a TyCtxt<'tcx>) -> ReachableContext<'a, 'tcx> {
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ReachableContext<'a, 'tcx> {
let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| {
*ty != config::CrateTypeExecutable
});
}
}
-pub fn find_reachable(tcx: &TyCtxt,
- access_levels: &privacy::AccessLevels)
- -> NodeSet {
+pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ access_levels: &privacy::AccessLevels)
+ -> NodeSet {
let _task = tcx.dep_graph.in_task(DepNode::Reachability);
let mut reachable_context = ReachableContext::new(tcx);
})
}
FnKind::Closure(_) => {
- self.add_scope_and_walk_fn(fk, fd, b, s, fn_id)
+ // Closures have their own set of labels, save labels just
+ // like for foreign items above.
+ let saved = replace(&mut self.labels_in_fn, vec![]);
+ let result = self.add_scope_and_walk_fn(fk, fd, b, s, fn_id);
+ replace(&mut self.labels_in_fn, saved);
+ result
}
}
}
// A private tree-walker for producing an Index.
struct Annotator<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
index: &'a mut Index<'tcx>,
parent_stab: Option<&'tcx Stability>,
parent_depr: Option<Deprecation>,
/// nested items in the context of the outer item, so enable
/// deep-walking.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, i: &Item) {
}
}
-impl<'tcx> Index<'tcx> {
+impl<'a, 'tcx> Index<'tcx> {
/// Construct the stability index for a crate being compiled.
- pub fn build(&mut self, tcx: &TyCtxt<'tcx>, access_levels: &AccessLevels) {
+ pub fn build(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &AccessLevels) {
let _task = tcx.dep_graph.in_task(DepNode::StabilityIndex);
let krate = tcx.map.krate();
let mut annotator = Annotator {
/// Cross-references the feature names of unstable APIs with enabled
/// features and possibly prints errors. Returns a list of all
/// features used.
-pub fn check_unstable_api_usage(tcx: &TyCtxt)
- -> FnvHashMap<InternedString, StabilityLevel> {
+pub fn check_unstable_api_usage<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> FnvHashMap<InternedString, StabilityLevel> {
let _task = tcx.dep_graph.in_task(DepNode::StabilityCheck);
let ref active_lib_features = tcx.sess.features.borrow().declared_lib_features;
}
struct Checker<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
active_features: FnvHashSet<InternedString>,
used_features: FnvHashMap<InternedString, StabilityLevel>,
// Within a block where feature gate checking can be skipped.
/// nested items in the context of the outer item, so enable
/// deep-walking.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, item: &hir::Item) {
}
/// Helper for discovering nodes to check for stability
-pub fn check_item(tcx: &TyCtxt, item: &hir::Item, warn_about_defns: bool,
- cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
+pub fn check_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ item: &hir::Item,
+ warn_about_defns: bool,
+ cb: &mut FnMut(DefId, Span,
+ &Option<&Stability>,
+ &Option<Deprecation>)) {
match item.node {
hir::ItemExternCrate(_) => {
// compiler-generated `extern crate` items have a dummy span.
}
/// Helper for discovering nodes to check for stability
-pub fn check_expr(tcx: &TyCtxt, e: &hir::Expr,
- cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
+pub fn check_expr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, e: &hir::Expr,
+ cb: &mut FnMut(DefId, Span,
+ &Option<&Stability>,
+ &Option<Deprecation>)) {
let span;
let id = match e.node {
hir::ExprMethodCall(i, _, _) => {
maybe_do_stability_check(tcx, id, span, cb);
}
-pub fn check_path(tcx: &TyCtxt, path: &hir::Path, id: ast::NodeId,
- cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
+pub fn check_path<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ path: &hir::Path, id: ast::NodeId,
+ cb: &mut FnMut(DefId, Span,
+ &Option<&Stability>,
+ &Option<Deprecation>)) {
match tcx.def_map.borrow().get(&id).map(|d| d.full_def()) {
Some(Def::PrimTy(..)) => {}
Some(Def::SelfTy(..)) => {}
}
}
-pub fn check_path_list_item(tcx: &TyCtxt, item: &hir::PathListItem,
- cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
+pub fn check_path_list_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ item: &hir::PathListItem,
+ cb: &mut FnMut(DefId, Span,
+ &Option<&Stability>,
+ &Option<Deprecation>)) {
match tcx.def_map.borrow().get(&item.node.id()).map(|d| d.full_def()) {
Some(Def::PrimTy(..)) => {}
Some(def) => {
}
}
-pub fn check_pat(tcx: &TyCtxt, pat: &hir::Pat,
- cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option<Deprecation>)) {
+pub fn check_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pat: &hir::Pat,
+ cb: &mut FnMut(DefId, Span,
+ &Option<&Stability>,
+ &Option<Deprecation>)) {
debug!("check_pat(pat = {:?})", pat);
if is_internal(tcx, pat.span) { return; }
}
}
-fn maybe_do_stability_check(tcx: &TyCtxt, id: DefId, span: Span,
- cb: &mut FnMut(DefId, Span,
- &Option<&Stability>, &Option<Deprecation>)) {
+fn maybe_do_stability_check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ id: DefId, span: Span,
+ cb: &mut FnMut(DefId, Span,
+ &Option<&Stability>,
+ &Option<Deprecation>)) {
if is_internal(tcx, span) {
debug!("maybe_do_stability_check: \
skipping span={:?} since it is internal", span);
return;
}
let (stability, deprecation) = if is_staged_api(tcx, id) {
- (lookup_stability(tcx, id), None)
+ (tcx.lookup_stability(id), None)
} else {
- (None, lookup_deprecation(tcx, id))
+ (None, tcx.lookup_deprecation(id))
};
debug!("maybe_do_stability_check: \
inspecting id={:?} span={:?} of stability={:?}", id, span, stability);
cb(id, span, &stability, &deprecation);
}
-fn is_internal(tcx: &TyCtxt, span: Span) -> bool {
+fn is_internal<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span) -> bool {
tcx.sess.codemap().span_allows_unstable(span)
}
-fn is_staged_api(tcx: &TyCtxt, id: DefId) -> bool {
+fn is_staged_api<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> bool {
match tcx.trait_item_of_item(id) {
Some(ty::MethodTraitItemId(trait_method_id))
if trait_method_id != id => {
}
}
-/// Lookup the stability for a node, loading external crate
-/// metadata as necessary.
-pub fn lookup_stability<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<&'tcx Stability> {
- if let Some(st) = tcx.stability.borrow().stab_map.get(&id) {
- return *st;
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ /// Lookup the stability for a node, loading external crate
+ /// metadata as necessary.
+ pub fn lookup_stability(self, id: DefId) -> Option<&'tcx Stability> {
+ if let Some(st) = self.stability.borrow().stab_map.get(&id) {
+ return *st;
+ }
+
+ let st = self.lookup_stability_uncached(id);
+ self.stability.borrow_mut().stab_map.insert(id, st);
+ st
}
- let st = lookup_stability_uncached(tcx, id);
- tcx.stability.borrow_mut().stab_map.insert(id, st);
- st
-}
+ pub fn lookup_deprecation(self, id: DefId) -> Option<Deprecation> {
+ if let Some(depr) = self.stability.borrow().depr_map.get(&id) {
+ return depr.clone();
+ }
-pub fn lookup_deprecation<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<Deprecation> {
- if let Some(depr) = tcx.stability.borrow().depr_map.get(&id) {
- return depr.clone();
+ let depr = self.lookup_deprecation_uncached(id);
+ self.stability.borrow_mut().depr_map.insert(id, depr.clone());
+ depr
}
- let depr = lookup_deprecation_uncached(tcx, id);
- tcx.stability.borrow_mut().depr_map.insert(id, depr.clone());
- depr
-}
-
-fn lookup_stability_uncached<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<&'tcx Stability> {
- debug!("lookup(id={:?})", id);
- if id.is_local() {
- None // The stability cache is filled partially lazily
- } else {
- tcx.sess.cstore.stability(id).map(|st| tcx.intern_stability(st))
+ fn lookup_stability_uncached(self, id: DefId) -> Option<&'tcx Stability> {
+ debug!("lookup(id={:?})", id);
+ if id.is_local() {
+ None // The stability cache is filled partially lazily
+ } else {
+ self.sess.cstore.stability(id).map(|st| self.intern_stability(st))
+ }
}
-}
-fn lookup_deprecation_uncached<'tcx>(tcx: &TyCtxt<'tcx>, id: DefId) -> Option<Deprecation> {
- debug!("lookup(id={:?})", id);
- if id.is_local() {
- None // The stability cache is filled partially lazily
- } else {
- tcx.sess.cstore.deprecation(id)
+ fn lookup_deprecation_uncached(self, id: DefId) -> Option<Deprecation> {
+ debug!("lookup(id={:?})", id);
+ if id.is_local() {
+ None // The stability cache is filled partially lazily
+ } else {
+ self.sess.cstore.deprecation(id)
+ }
}
}
//! Validity checking for weak lang items
-use session::config;
+use session::config::{self, PanicStrategy};
use session::Session;
use middle::lang_items;
config::CrateTypeRlib => false,
}
});
- if !needs_check { return }
+ if !needs_check {
+ return
+ }
let mut missing = HashSet::new();
for cnum in sess.cstore.crates() {
}
}
+ // If we're not compiling with unwinding, we won't actually need these
+ // symbols. Other panic runtimes ensure that the relevant symbols are
+ // available to link things together, but they're never exercised.
+ let mut whitelisted = HashSet::new();
+ if sess.opts.cg.panic != PanicStrategy::Unwind {
+ whitelisted.insert(lang_items::EhPersonalityLangItem);
+ whitelisted.insert(lang_items::EhUnwindResumeLangItem);
+ }
+
$(
- if missing.contains(&lang_items::$item) && items.$name().is_none() {
+ if missing.contains(&lang_items::$item) &&
+ !whitelisted.contains(&lang_items::$item) &&
+ items.$name().is_none() {
sess.err(&format!("language item required, but not found: `{}`",
stringify!($name)));
Vec,
Tuple,
Adt(AdtDef<'tcx>, usize, &'tcx Substs<'tcx>),
- Closure(DefId, &'tcx ClosureSubsts<'tcx>),
+ Closure(DefId, ClosureSubsts<'tcx>),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
variant_index: usize },
}
-impl<'tcx> LvalueTy<'tcx> {
+impl<'a, 'gcx, 'tcx> LvalueTy<'tcx> {
pub fn from_ty(ty: Ty<'tcx>) -> LvalueTy<'tcx> {
LvalueTy::Ty { ty: ty }
}
- pub fn to_ty(&self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn to_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match *self {
LvalueTy::Ty { ty } =>
ty,
}
}
- pub fn projection_ty(self,
- tcx: &TyCtxt<'tcx>,
+ pub fn projection_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
elem: &LvalueElem<'tcx>)
-> LvalueTy<'tcx>
{
}
impl<'tcx> TypeFoldable<'tcx> for LvalueTy<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.fold_with(folder) },
LvalueTy::Downcast { adt_def, substs, variant_index } => {
- let substs = substs.fold_with(folder);
LvalueTy::Downcast {
adt_def: adt_def,
- substs: folder.tcx().mk_substs(substs),
+ substs: substs.fold_with(folder),
variant_index: variant_index
}
}
}
}
-impl<'tcx> Mir<'tcx> {
- pub fn operand_ty(&self,
- tcx: &TyCtxt<'tcx>,
+impl<'a, 'gcx, 'tcx> Mir<'tcx> {
+ pub fn operand_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
operand: &Operand<'tcx>)
-> Ty<'tcx>
{
}
}
- pub fn binop_ty(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn binop_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
op: BinOp,
lhs_ty: Ty<'tcx>,
rhs_ty: Ty<'tcx>)
}
}
- pub fn lvalue_ty(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn lvalue_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
lvalue: &Lvalue<'tcx>)
-> LvalueTy<'tcx>
{
}
}
- pub fn rvalue_ty(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn rvalue_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
rvalue: &Rvalue<'tcx>)
-> Option<Ty<'tcx>>
{
))
}
AggregateKind::Adt(def, _, substs) => {
- Some(def.type_scheme(tcx).ty.subst(tcx, substs))
+ Some(tcx.lookup_item_type(def.did).ty.subst(tcx, substs))
}
AggregateKind::Closure(did, substs) => {
- Some(tcx.mk_closure_from_closure_substs(
- did, Box::new(substs.clone())))
+ Some(tcx.mk_closure_from_closure_substs(did, substs))
}
}
}
Promoted(NodeId, usize)
}
-impl MirSource {
- pub fn from_node(tcx: &TyCtxt, id: NodeId) -> MirSource {
+impl<'a, 'tcx> MirSource {
+ pub fn from_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: NodeId) -> MirSource {
use hir::*;
// Handle constants in enum discriminants, types, and repeat expressions.
/// A pass which inspects the whole MirMap.
pub trait MirMapPass<'tcx>: Pass {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>);
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>);
}
/// A pass which inspects Mir of functions in isolation.
pub trait MirPass<'tcx>: Pass {
- fn run_pass_on_promoted(&mut self, tcx: &TyCtxt<'tcx>,
- item_id: NodeId, index: usize,
- mir: &mut Mir<'tcx>) {
+ fn run_pass_on_promoted<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ item_id: NodeId, index: usize,
+ mir: &mut Mir<'tcx>) {
self.run_pass(tcx, MirSource::Promoted(item_id, index), mir);
}
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, src: MirSource, mir: &mut Mir<'tcx>);
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ src: MirSource, mir: &mut Mir<'tcx>);
}
impl<'tcx, T: MirPass<'tcx>> MirMapPass<'tcx> for T {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>) {
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>) {
for (&id, mir) in &mut map.map {
let def_id = tcx.map.local_def_id(id);
let _task = tcx.dep_graph.in_task(self.dep_node(def_id));
plugin_passes: Vec<Box<for<'tcx> MirMapPass<'tcx>>>
}
-impl Passes {
+impl<'a, 'tcx> Passes {
pub fn new() -> Passes {
let passes = Passes {
passes: Vec::new(),
passes
}
- pub fn run_passes<'tcx>(&mut self, pcx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>) {
+ pub fn run_passes(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>) {
for pass in &mut self.plugin_passes {
- pass.run_pass(pcx, map);
+ pass.run_pass(tcx, map);
}
for pass in &mut self.passes {
- pass.run_pass(pcx, map);
+ pass.run_pass(tcx, map);
}
}
/// Pushes a built-in pass.
- pub fn push_pass(&mut self, pass: Box<for<'a> MirMapPass<'a>>) {
+ pub fn push_pass(&mut self, pass: Box<for<'b> MirMapPass<'b>>) {
self.passes.push(pass);
}
}
}
fn visit_closure_substs(&mut self,
- substs: & $($mutability)* &'tcx ClosureSubsts<'tcx>) {
+ substs: & $($mutability)* ClosureSubsts<'tcx>) {
self.super_closure_substs(substs);
}
}
fn super_closure_substs(&mut self,
- _substs: & $($mutability)* &'tcx ClosureSubsts<'tcx>) {
+ _substs: & $($mutability)* ClosureSubsts<'tcx>) {
}
fn super_const_val(&mut self, _substs: & $($mutability)* ConstVal) {
}
}
+#[derive(Clone, PartialEq)]
+pub enum PanicStrategy {
+ Unwind,
+ Abort,
+}
+
+impl PanicStrategy {
+ pub fn desc(&self) -> &str {
+ match *self {
+ PanicStrategy::Unwind => "unwind",
+ PanicStrategy::Abort => "abort",
+ }
+ }
+}
+
/// Declare a macro that will define all CodegenOptions/DebuggingOptions fields and parsers all
/// at once. The goal of this macro is to define an interface that can be
/// programmatically used by the option parser in order to initialize the struct
Some("a space-separated list of passes, or `all`");
pub const parse_opt_uint: Option<&'static str> =
Some("a number");
+ pub const parse_panic_strategy: Option<&'static str> =
+ Some("either `panic` or `abort`");
}
#[allow(dead_code)]
mod $mod_set {
- use super::{$struct_name, Passes, SomePasses, AllPasses};
+ use super::{$struct_name, Passes, SomePasses, AllPasses, PanicStrategy};
$(
pub fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool {
}
}
}
+
+ fn parse_panic_strategy(slot: &mut PanicStrategy, v: Option<&str>) -> bool {
+ match v {
+ Some("unwind") => *slot = PanicStrategy::Unwind,
+ Some("abort") => *slot = PanicStrategy::Abort,
+ _ => return false
+ }
+ true
+ }
}
) }
"explicitly enable the cfg(debug_assertions) directive"),
inline_threshold: Option<usize> = (None, parse_opt_uint,
"set the inlining threshold for"),
+ panic: PanicStrategy = (PanicStrategy::Unwind, parse_panic_strategy,
+ "panic strategy to compile crate with"),
}
use middle::cstore::CrateStore;
use middle::dependency_format;
use session::search_paths::PathKind;
+use session::config::PanicStrategy;
use ty::tls;
use util::nodemap::{NodeMap, FnvHashMap};
use mir::transform as mir_pass;
/// operations such as auto-dereference and monomorphization.
pub recursion_limit: Cell<usize>,
- /// The metadata::creader module may inject an allocator dependency if it
- /// didn't already find one, and this tracks what was injected.
+ /// The metadata::creader module may inject an allocator/panic_runtime
+ /// dependency if it didn't already find one, and this tracks what was
+ /// injected.
pub injected_allocator: Cell<Option<ast::CrateNum>>,
+ pub injected_panic_runtime: Cell<Option<ast::CrateNum>>,
/// Names of all bang-style macros and syntax extensions
/// available in this crate
self.opts.cg.lto
}
pub fn no_landing_pads(&self) -> bool {
- self.opts.debugging_opts.no_landing_pads
+ self.opts.debugging_opts.no_landing_pads ||
+ self.opts.cg.panic == PanicStrategy::Abort
}
pub fn unstable_options(&self) -> bool {
self.opts.debugging_opts.unstable_options
recursion_limit: Cell::new(64),
next_node_id: Cell::new(1),
injected_allocator: Cell::new(None),
+ injected_panic_runtime: Cell::new(None),
available_macros: RefCell::new(HashSet::new()),
imported_macro_spans: RefCell::new(HashMap::new()),
};
use hir::def_id::DefId;
use ty::subst::TypeSpace;
use ty::{self, Ty, TyCtxt};
-use infer::{self, InferCtxt, TypeOrigin};
+use infer::{InferCtxt, TypeOrigin};
use syntax::codemap::DUMMY_SP;
#[derive(Copy, Clone)]
/// If there are types that satisfy both impls, returns a suitably-freshened
/// `ImplHeader` with those types substituted
-pub fn overlapping_impls<'cx, 'tcx>(infcx: &InferCtxt<'cx, 'tcx>,
- impl1_def_id: DefId,
- impl2_def_id: DefId)
- -> Option<ty::ImplHeader<'tcx>>
+pub fn overlapping_impls<'cx, 'gcx, 'tcx>(infcx: &InferCtxt<'cx, 'gcx, 'tcx>,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId)
+ -> Option<ty::ImplHeader<'tcx>>
{
debug!("impl_can_satisfy(\
impl1_def_id={:?}, \
/// Can both impl `a` and impl `b` be satisfied by a common type (including
/// `where` clauses)? If so, returns an `ImplHeader` that unifies the two impls.
-fn overlap<'cx, 'tcx>(selcx: &mut SelectionContext<'cx, 'tcx>,
- a_def_id: DefId,
- b_def_id: DefId)
- -> Option<ty::ImplHeader<'tcx>>
+fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
+ a_def_id: DefId,
+ b_def_id: DefId)
+ -> Option<ty::ImplHeader<'tcx>>
{
debug!("overlap(a_def_id={:?}, b_def_id={:?})",
a_def_id,
debug!("overlap: b_impl_header={:?}", b_impl_header);
// Do `a` and `b` unify? If not, no overlap.
- if let Err(_) = infer::mk_eq_impl_headers(selcx.infcx(),
- true,
- TypeOrigin::Misc(DUMMY_SP),
- &a_impl_header,
- &b_impl_header) {
+ if let Err(_) = selcx.infcx().eq_impl_headers(true,
+ TypeOrigin::Misc(DUMMY_SP),
+ &a_impl_header,
+ &b_impl_header) {
return None;
}
Some(selcx.infcx().resolve_type_vars_if_possible(&a_impl_header))
}
-pub fn trait_ref_is_knowable<'tcx>(tcx: &TyCtxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool
+pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ trait_ref: &ty::TraitRef<'tcx>) -> bool
{
debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref);
///
/// 1. All type parameters in `Self` must be "covered" by some local type constructor.
/// 2. Some local type must appear in `Self`.
-pub fn orphan_check<'tcx>(tcx: &TyCtxt<'tcx>,
- impl_def_id: DefId)
- -> Result<(), OrphanCheckErr<'tcx>>
+pub fn orphan_check<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId)
+ -> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check({:?})", impl_def_id);
orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false))
}
-fn orphan_check_trait_ref<'tcx>(tcx: &TyCtxt<'tcx>,
+fn orphan_check_trait_ref<'tcx>(tcx: TyCtxt,
trait_ref: &ty::TraitRef<'tcx>,
infer_is_local: InferIsLocal)
-> Result<(), OrphanCheckErr<'tcx>>
return Err(OrphanCheckErr::NoLocalInputType);
}
-fn uncovered_tys<'tcx>(tcx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- infer_is_local: InferIsLocal)
- -> Vec<Ty<'tcx>>
-{
+fn uncovered_tys<'tcx>(tcx: TyCtxt, ty: Ty<'tcx>, infer_is_local: InferIsLocal)
+ -> Vec<Ty<'tcx>> {
if ty_is_local_constructor(tcx, ty, infer_is_local) {
vec![]
} else if fundamental_ty(tcx, ty) {
}
}
-fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool {
+fn is_type_parameter(ty: Ty) -> bool {
match ty.sty {
// FIXME(#20590) straighten story about projection types
ty::TyProjection(..) | ty::TyParam(..) => true,
}
}
-fn ty_is_local<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool
-{
+fn ty_is_local(tcx: TyCtxt, ty: Ty, infer_is_local: InferIsLocal) -> bool {
ty_is_local_constructor(tcx, ty, infer_is_local) ||
fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local))
}
-fn fundamental_ty<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool
-{
+fn fundamental_ty(tcx: TyCtxt, ty: Ty) -> bool {
match ty.sty {
ty::TyBox(..) | ty::TyRef(..) =>
true,
}
}
-fn ty_is_local_constructor<'tcx>(tcx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- infer_is_local: InferIsLocal)
- -> bool
-{
+fn ty_is_local_constructor(tcx: TyCtxt, ty: Ty, infer_is_local: InferIsLocal)-> bool {
debug!("ty_is_local_constructor({:?})", ty);
match ty.sty {
SelectionError,
ObjectSafetyViolation,
MethodViolationCode,
- object_safety_violations,
};
use fmt_macros::{Parser, Piece, Position};
predicate: ty::Predicate<'tcx>
}
-impl<'tcx> TraitErrorKey<'tcx> {
- fn from_error<'a>(infcx: &InferCtxt<'a, 'tcx>,
- e: &FulfillmentError<'tcx>,
- warning_node_id: Option<ast::NodeId>) -> Self {
+impl<'a, 'gcx, 'tcx> TraitErrorKey<'tcx> {
+ fn from_error(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ e: &FulfillmentError<'tcx>,
+ warning_node_id: Option<ast::NodeId>) -> Self {
let predicate =
infcx.resolve_type_vars_if_possible(&e.obligation.predicate);
TraitErrorKey {
}
}
-pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- errors: &Vec<FulfillmentError<'tcx>>) {
- for error in errors {
- report_fulfillment_error(infcx, error, None);
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
+ pub fn report_fulfillment_errors(&self, errors: &Vec<FulfillmentError<'tcx>>) {
+ for error in errors {
+ self.report_fulfillment_error(error, None);
+ }
}
-}
-pub fn report_fulfillment_errors_as_warnings<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- errors: &Vec<FulfillmentError<'tcx>>,
- node_id: ast::NodeId)
-{
- for error in errors {
- report_fulfillment_error(infcx, error, Some(node_id));
+ pub fn report_fulfillment_errors_as_warnings(&self,
+ errors: &Vec<FulfillmentError<'tcx>>,
+ node_id: ast::NodeId) {
+ for error in errors {
+ self.report_fulfillment_error(error, Some(node_id));
+ }
}
-}
-fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- error: &FulfillmentError<'tcx>,
- warning_node_id: Option<ast::NodeId>) {
- let error_key = TraitErrorKey::from_error(infcx, error, warning_node_id);
- debug!("report_fulfillment_errors({:?}) - key={:?}",
- error, error_key);
- if !infcx.reported_trait_errors.borrow_mut().insert(error_key) {
- debug!("report_fulfillment_errors: skipping duplicate");
- return;
- }
- match error.code {
- FulfillmentErrorCode::CodeSelectionError(ref e) => {
- report_selection_error(infcx, &error.obligation, e, warning_node_id);
- }
- FulfillmentErrorCode::CodeProjectionError(ref e) => {
- report_projection_error(infcx, &error.obligation, e, warning_node_id);
- }
- FulfillmentErrorCode::CodeAmbiguity => {
- maybe_report_ambiguity(infcx, &error.obligation);
+ fn report_fulfillment_error(&self,
+ error: &FulfillmentError<'tcx>,
+ warning_node_id: Option<ast::NodeId>) {
+ let error_key = TraitErrorKey::from_error(self, error, warning_node_id);
+ debug!("report_fulfillment_errors({:?}) - key={:?}",
+ error, error_key);
+ if !self.reported_trait_errors.borrow_mut().insert(error_key) {
+ debug!("report_fulfillment_errors: skipping duplicate");
+ return;
+ }
+ match error.code {
+ FulfillmentErrorCode::CodeSelectionError(ref e) => {
+ self.report_selection_error(&error.obligation, e, warning_node_id);
+ }
+ FulfillmentErrorCode::CodeProjectionError(ref e) => {
+ self.report_projection_error(&error.obligation, e, warning_node_id);
+ }
+ FulfillmentErrorCode::CodeAmbiguity => {
+ self.maybe_report_ambiguity(&error.obligation);
+ }
}
}
-}
-pub fn report_projection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>,
- error: &MismatchedProjectionTypes<'tcx>,
- warning_node_id: Option<ast::NodeId>)
-{
- let predicate =
- infcx.resolve_type_vars_if_possible(&obligation.predicate);
-
- if !predicate.references_error() {
- if let Some(warning_node_id) = warning_node_id {
- infcx.tcx.sess.add_lint(
- ::lint::builtin::UNSIZED_IN_TUPLE,
- warning_node_id,
- obligation.cause.span,
- format!("type mismatch resolving `{}`: {}",
- predicate,
- error.err));
- } else {
- let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0271,
- "type mismatch resolving `{}`: {}",
- predicate,
- error.err);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+ fn report_projection_error(&self,
+ obligation: &PredicateObligation<'tcx>,
+ error: &MismatchedProjectionTypes<'tcx>,
+ warning_node_id: Option<ast::NodeId>)
+ {
+ let predicate =
+ self.resolve_type_vars_if_possible(&obligation.predicate);
+
+ if !predicate.references_error() {
+ if let Some(warning_node_id) = warning_node_id {
+ self.tcx.sess.add_lint(
+ ::lint::builtin::UNSIZED_IN_TUPLE,
+ warning_node_id,
+ obligation.cause.span,
+ format!("type mismatch resolving `{}`: {}",
+ predicate,
+ error.err));
+ } else {
+ let mut err = struct_span_err!(self.tcx.sess, obligation.cause.span, E0271,
+ "type mismatch resolving `{}`: {}",
+ predicate,
+ error.err);
+ self.note_obligation_cause(&mut err, obligation);
+ err.emit();
+ }
}
}
-}
-fn on_unimplemented_note<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>,
- span: Span) -> Option<String> {
- let trait_ref = trait_ref.skip_binder();
- let def_id = trait_ref.def_id;
- let mut report = None;
- for item in infcx.tcx.get_attrs(def_id).iter() {
- if item.check_name("rustc_on_unimplemented") {
- let err_sp = item.meta().span.substitute_dummy(span);
- let def = infcx.tcx.lookup_trait_def(def_id);
- let trait_str = def.trait_ref.to_string();
- if let Some(ref istring) = item.value_str() {
- let mut generic_map = def.generics.types.iter_enumerated()
- .map(|(param, i, gen)| {
- (gen.name.as_str().to_string(),
- trait_ref.substs.types.get(param, i)
- .to_string())
- }).collect::<FnvHashMap<String, String>>();
- generic_map.insert("Self".to_string(),
- trait_ref.self_ty().to_string());
- let parser = Parser::new(&istring);
- let mut errored = false;
- let err: String = parser.filter_map(|p| {
- match p {
- Piece::String(s) => Some(s),
- Piece::NextArgument(a) => match a.position {
- Position::ArgumentNamed(s) => match generic_map.get(s) {
- Some(val) => Some(val),
- None => {
- span_err!(infcx.tcx.sess, err_sp, E0272,
- "the #[rustc_on_unimplemented] \
- attribute on \
- trait definition for {} refers to \
- non-existent type parameter {}",
- trait_str, s);
+ fn on_unimplemented_note(&self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ span: Span) -> Option<String> {
+ let trait_ref = trait_ref.skip_binder();
+ let def_id = trait_ref.def_id;
+ let mut report = None;
+ for item in self.tcx.get_attrs(def_id).iter() {
+ if item.check_name("rustc_on_unimplemented") {
+ let err_sp = item.meta().span.substitute_dummy(span);
+ let def = self.tcx.lookup_trait_def(def_id);
+ let trait_str = def.trait_ref.to_string();
+ if let Some(ref istring) = item.value_str() {
+ let mut generic_map = def.generics.types.iter_enumerated()
+ .map(|(param, i, gen)| {
+ (gen.name.as_str().to_string(),
+ trait_ref.substs.types.get(param, i)
+ .to_string())
+ }).collect::<FnvHashMap<String, String>>();
+ generic_map.insert("Self".to_string(),
+ trait_ref.self_ty().to_string());
+ let parser = Parser::new(&istring);
+ let mut errored = false;
+ let err: String = parser.filter_map(|p| {
+ match p {
+ Piece::String(s) => Some(s),
+ Piece::NextArgument(a) => match a.position {
+ Position::ArgumentNamed(s) => match generic_map.get(s) {
+ Some(val) => Some(val),
+ None => {
+ span_err!(self.tcx.sess, err_sp, E0272,
+ "the #[rustc_on_unimplemented] \
+ attribute on \
+ trait definition for {} refers to \
+ non-existent type parameter {}",
+ trait_str, s);
+ errored = true;
+ None
+ }
+ },
+ _ => {
+ span_err!(self.tcx.sess, err_sp, E0273,
+ "the #[rustc_on_unimplemented] attribute \
+ on trait definition for {} must have \
+ named format arguments, eg \
+ `#[rustc_on_unimplemented = \
+ \"foo {{T}}\"]`", trait_str);
errored = true;
None
}
- },
- _ => {
- span_err!(infcx.tcx.sess, err_sp, E0273,
- "the #[rustc_on_unimplemented] \
- attribute on \
- trait definition for {} must have named \
- format arguments, \
- eg `#[rustc_on_unimplemented = \
- \"foo {{T}}\"]`",
- trait_str);
- errored = true;
- None
}
}
+ }).collect();
+ // Report only if the format string checks out
+ if !errored {
+ report = Some(err);
}
- }).collect();
- // Report only if the format string checks out
- if !errored {
- report = Some(err);
+ } else {
+ span_err!(self.tcx.sess, err_sp, E0274,
+ "the #[rustc_on_unimplemented] attribute on \
+ trait definition for {} must have a value, \
+ eg `#[rustc_on_unimplemented = \"foo\"]`",
+ trait_str);
}
- } else {
- span_err!(infcx.tcx.sess, err_sp, E0274,
- "the #[rustc_on_unimplemented] attribute on \
- trait definition for {} must have a value, \
- eg `#[rustc_on_unimplemented = \"foo\"]`",
- trait_str);
+ break;
}
- break;
}
+ report
}
- report
-}
-fn find_similar_impl_candidates<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>)
- -> Vec<ty::TraitRef<'tcx>>
-{
- let simp = fast_reject::simplify_type(infcx.tcx,
- trait_ref.skip_binder().self_ty(),
- true);
- let mut impl_candidates = Vec::new();
- let trait_def = infcx.tcx.lookup_trait_def(trait_ref.def_id());
-
- match simp {
- Some(simp) => trait_def.for_each_impl(infcx.tcx, |def_id| {
- let imp = infcx.tcx.impl_trait_ref(def_id).unwrap();
- let imp_simp = fast_reject::simplify_type(infcx.tcx,
- imp.self_ty(),
- true);
- if let Some(imp_simp) = imp_simp {
- if simp != imp_simp {
- return;
+ fn report_similar_impl_candidates(&self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ err: &mut DiagnosticBuilder)
+ {
+ let simp = fast_reject::simplify_type(self.tcx,
+ trait_ref.skip_binder().self_ty(),
+ true);
+ let mut impl_candidates = Vec::new();
+ let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id());
+
+ match simp {
+ Some(simp) => trait_def.for_each_impl(self.tcx, |def_id| {
+ let imp = self.tcx.impl_trait_ref(def_id).unwrap();
+ let imp_simp = fast_reject::simplify_type(self.tcx,
+ imp.self_ty(),
+ true);
+ if let Some(imp_simp) = imp_simp {
+ if simp != imp_simp {
+ return;
+ }
}
- }
- impl_candidates.push(imp);
- }),
- None => trait_def.for_each_impl(infcx.tcx, |def_id| {
- impl_candidates.push(
- infcx.tcx.impl_trait_ref(def_id).unwrap());
- })
- };
- impl_candidates
-}
+ impl_candidates.push(imp);
+ }),
+ None => trait_def.for_each_impl(self.tcx, |def_id| {
+ impl_candidates.push(
+ self.tcx.impl_trait_ref(def_id).unwrap());
+ })
+ };
-fn report_similar_impl_candidates(err: &mut DiagnosticBuilder,
- impl_candidates: &[ty::TraitRef])
-{
- err.help(&format!("the following implementations were found:"));
+ if impl_candidates.is_empty() {
+ return;
+ }
- let end = cmp::min(4, impl_candidates.len());
- for candidate in &impl_candidates[0..end] {
- err.help(&format!(" {:?}", candidate));
- }
- if impl_candidates.len() > 4 {
- err.help(&format!("and {} others", impl_candidates.len()-4));
- }
-}
+ err.help(&format!("the following implementations were found:"));
-/// Reports that an overflow has occurred and halts compilation. We
-/// halt compilation unconditionally because it is important that
-/// overflows never be masked -- they basically represent computations
-/// whose result could not be truly determined and thus we can't say
-/// if the program type checks or not -- and they are unusual
-/// occurrences in any case.
-pub fn report_overflow_error<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
- obligation: &Obligation<'tcx, T>,
- suggest_increasing_limit: bool)
- -> !
- where T: fmt::Display + TypeFoldable<'tcx>
-{
- let predicate =
- infcx.resolve_type_vars_if_possible(&obligation.predicate);
- let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0275,
- "overflow evaluating the requirement `{}`",
- predicate);
-
- if suggest_increasing_limit {
- suggest_new_overflow_limit(infcx.tcx, &mut err);
+ let end = cmp::min(4, impl_candidates.len());
+ for candidate in &impl_candidates[0..end] {
+ err.help(&format!(" {:?}", candidate));
+ }
+ if impl_candidates.len() > 4 {
+ err.help(&format!("and {} others", impl_candidates.len()-4));
+ }
}
- note_obligation_cause(infcx, &mut err, obligation);
+ /// Reports that an overflow has occurred and halts compilation. We
+ /// halt compilation unconditionally because it is important that
+ /// overflows never be masked -- they basically represent computations
+ /// whose result could not be truly determined and thus we can't say
+ /// if the program type checks or not -- and they are unusual
+ /// occurrences in any case.
+ pub fn report_overflow_error<T>(&self,
+ obligation: &Obligation<'tcx, T>,
+ suggest_increasing_limit: bool) -> !
+ where T: fmt::Display + TypeFoldable<'tcx>
+ {
+ let predicate =
+ self.resolve_type_vars_if_possible(&obligation.predicate);
+ let mut err = struct_span_err!(self.tcx.sess, obligation.cause.span, E0275,
+ "overflow evaluating the requirement `{}`",
+ predicate);
- err.emit();
- infcx.tcx.sess.abort_if_errors();
- bug!();
-}
+ if suggest_increasing_limit {
+ self.suggest_new_overflow_limit(&mut err);
+ }
-/// Reports that a cycle was detected which led to overflow and halts
-/// compilation. This is equivalent to `report_overflow_error` except
-/// that we can give a more helpful error message (and, in particular,
-/// we do not suggest increasing the overflow limit, which is not
-/// going to help).
-pub fn report_overflow_error_cycle<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- cycle: &Vec<PredicateObligation<'tcx>>)
- -> !
-{
- assert!(cycle.len() > 1);
+ self.note_obligation_cause(&mut err, obligation);
- debug!("report_overflow_error_cycle(cycle length = {})", cycle.len());
+ err.emit();
+ self.tcx.sess.abort_if_errors();
+ bug!();
+ }
- let cycle = infcx.resolve_type_vars_if_possible(cycle);
+ /// Reports that a cycle was detected which led to overflow and halts
+ /// compilation. This is equivalent to `report_overflow_error` except
+ /// that we can give a more helpful error message (and, in particular,
+ /// we do not suggest increasing the overflow limit, which is not
+ /// going to help).
+ pub fn report_overflow_error_cycle(&self, cycle: &Vec<PredicateObligation<'tcx>>) -> ! {
+ assert!(cycle.len() > 1);
- debug!("report_overflow_error_cycle: cycle={:?}", cycle);
+ debug!("report_overflow_error_cycle(cycle length = {})", cycle.len());
- assert_eq!(&cycle[0].predicate, &cycle.last().unwrap().predicate);
+ let cycle = self.resolve_type_vars_if_possible(cycle);
- try_report_overflow_error_type_of_infinite_size(infcx, &cycle);
- report_overflow_error(infcx, &cycle[0], false);
-}
+ debug!("report_overflow_error_cycle: cycle={:?}", cycle);
-/// If a cycle results from evaluated whether something is Sized, that
-/// is a particular special case that always results from a struct or
-/// enum definition that lacks indirection (e.g., `struct Foo { x: Foo
-/// }`). We wish to report a targeted error for this case.
-pub fn try_report_overflow_error_type_of_infinite_size<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
- cycle: &[PredicateObligation<'tcx>])
-{
- let sized_trait = match infcx.tcx.lang_items.sized_trait() {
- Some(v) => v,
- None => return,
- };
- let top_is_sized = {
- match cycle[0].predicate {
- ty::Predicate::Trait(ref data) => data.def_id() == sized_trait,
- _ => false,
- }
- };
- if !top_is_sized {
- return;
+ assert_eq!(&cycle[0].predicate, &cycle.last().unwrap().predicate);
+
+ self.try_report_overflow_error_type_of_infinite_size(&cycle);
+ self.report_overflow_error(&cycle[0], false);
}
- // The only way to have a type of infinite size is to have,
- // somewhere, a struct/enum type involved. Identify all such types
- // and report the cycle to the user.
-
- let struct_enum_tys: Vec<_> =
- cycle.iter()
- .flat_map(|obligation| match obligation.predicate {
- ty::Predicate::Trait(ref data) => {
- assert_eq!(data.def_id(), sized_trait);
- let self_ty = data.skip_binder().trait_ref.self_ty(); // (*)
- // (*) ok to skip binder because this is just
- // error reporting and regions don't really
- // matter
- match self_ty.sty {
- ty::TyEnum(..) | ty::TyStruct(..) => Some(self_ty),
- _ => None,
+ /// If a cycle results from evaluated whether something is Sized, that
+ /// is a particular special case that always results from a struct or
+ /// enum definition that lacks indirection (e.g., `struct Foo { x: Foo
+ /// }`). We wish to report a targeted error for this case.
+ pub fn try_report_overflow_error_type_of_infinite_size(&self,
+ cycle: &[PredicateObligation<'tcx>])
+ {
+ let sized_trait = match self.tcx.lang_items.sized_trait() {
+ Some(v) => v,
+ None => return,
+ };
+ let top_is_sized = {
+ match cycle[0].predicate {
+ ty::Predicate::Trait(ref data) => data.def_id() == sized_trait,
+ _ => false,
+ }
+ };
+ if !top_is_sized {
+ return;
+ }
+
+ // The only way to have a type of infinite size is to have,
+ // somewhere, a struct/enum type involved. Identify all such types
+ // and report the cycle to the user.
+
+ let struct_enum_tys: Vec<_> =
+ cycle.iter()
+ .flat_map(|obligation| match obligation.predicate {
+ ty::Predicate::Trait(ref data) => {
+ assert_eq!(data.def_id(), sized_trait);
+ let self_ty = data.skip_binder().trait_ref.self_ty(); // (*)
+ // (*) ok to skip binder because this is just
+ // error reporting and regions don't really
+ // matter
+ match self_ty.sty {
+ ty::TyEnum(..) | ty::TyStruct(..) => Some(self_ty),
+ _ => None,
+ }
}
- }
- _ => {
- span_bug!(obligation.cause.span,
- "Sized cycle involving non-trait-ref: {:?}",
- obligation.predicate);
- }
- })
- .collect();
-
- assert!(!struct_enum_tys.is_empty());
-
- // This is a bit tricky. We want to pick a "main type" in the
- // listing that is local to the current crate, so we can give a
- // good span to the user. But it might not be the first one in our
- // cycle list. So find the first one that is local and then
- // rotate.
- let (main_index, main_def_id) =
- struct_enum_tys.iter()
- .enumerate()
- .filter_map(|(index, ty)| match ty.sty {
- ty::TyEnum(adt_def, _) | ty::TyStruct(adt_def, _)
- if adt_def.did.is_local() =>
- Some((index, adt_def.did)),
- _ =>
- None,
- })
- .next()
- .unwrap(); // should always be SOME local type involved!
-
- // Rotate so that the "main" type is at index 0.
- let struct_enum_tys: Vec<_> =
- struct_enum_tys.iter()
- .cloned()
- .skip(main_index)
- .chain(struct_enum_tys.iter().cloned().take(main_index))
- .collect();
-
- let tcx = infcx.tcx;
- let mut err = recursive_type_with_infinite_size_error(tcx, main_def_id);
- let len = struct_enum_tys.len();
- if len > 2 {
- err.note(&format!("type `{}` is embedded within `{}`...",
- struct_enum_tys[0],
- struct_enum_tys[1]));
- for &next_ty in &struct_enum_tys[1..len-1] {
- err.note(&format!("...which in turn is embedded within `{}`...", next_ty));
+ _ => {
+ span_bug!(obligation.cause.span,
+ "Sized cycle involving non-trait-ref: {:?}",
+ obligation.predicate);
+ }
+ })
+ .collect();
+
+ assert!(!struct_enum_tys.is_empty());
+
+ // This is a bit tricky. We want to pick a "main type" in the
+ // listing that is local to the current crate, so we can give a
+ // good span to the user. But it might not be the first one in our
+ // cycle list. So find the first one that is local and then
+ // rotate.
+ let (main_index, main_def_id) =
+ struct_enum_tys.iter()
+ .enumerate()
+ .filter_map(|(index, ty)| match ty.sty {
+ ty::TyEnum(adt_def, _) | ty::TyStruct(adt_def, _)
+ if adt_def.did.is_local() =>
+ Some((index, adt_def.did)),
+ _ =>
+ None,
+ })
+ .next()
+ .unwrap(); // should always be SOME local type involved!
+
+ // Rotate so that the "main" type is at index 0.
+ let struct_enum_tys: Vec<_> =
+ struct_enum_tys.iter()
+ .cloned()
+ .skip(main_index)
+ .chain(struct_enum_tys.iter().cloned().take(main_index))
+ .collect();
+
+ let tcx = self.tcx;
+ let mut err = tcx.recursive_type_with_infinite_size_error(main_def_id);
+ let len = struct_enum_tys.len();
+ if len > 2 {
+ err.note(&format!("type `{}` is embedded within `{}`...",
+ struct_enum_tys[0],
+ struct_enum_tys[1]));
+ for &next_ty in &struct_enum_tys[1..len-1] {
+ err.note(&format!("...which in turn is embedded within `{}`...", next_ty));
+ }
+ err.note(&format!("...which in turn is embedded within `{}`, \
+ completing the cycle.",
+ struct_enum_tys[len-1]));
}
- err.note(&format!("...which in turn is embedded within `{}`, \
- completing the cycle.",
- struct_enum_tys[len-1]));
+ err.emit();
+ self.tcx.sess.abort_if_errors();
+ bug!();
}
- err.emit();
- infcx.tcx.sess.abort_if_errors();
- bug!();
-}
-pub fn recursive_type_with_infinite_size_error<'tcx>(tcx: &TyCtxt<'tcx>,
- type_def_id: DefId)
- -> DiagnosticBuilder<'tcx>
-{
- assert!(type_def_id.is_local());
- let span = tcx.map.span_if_local(type_def_id).unwrap();
- let mut err = struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size",
- tcx.item_path_str(type_def_id));
- err.help(&format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \
- at some point to make `{}` representable",
- tcx.item_path_str(type_def_id)));
- err
-}
+ pub fn report_selection_error(&self,
+ obligation: &PredicateObligation<'tcx>,
+ error: &SelectionError<'tcx>,
+ warning_node_id: Option<ast::NodeId>)
+ {
+ let span = obligation.cause.span;
+ let mut err = match *error {
+ SelectionError::Unimplemented => {
+ if let ObligationCauseCode::CompareImplMethodObligation = obligation.cause.code {
+ span_err!(
+ self.tcx.sess, span, E0276,
+ "the requirement `{}` appears on the impl \
+ method but not on the corresponding trait method",
+ obligation.predicate);
+ return;
+ } else {
+ match obligation.predicate {
+ ty::Predicate::Trait(ref trait_predicate) => {
+ let trait_predicate =
+ self.resolve_type_vars_if_possible(trait_predicate);
-pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>,
- error: &SelectionError<'tcx>,
- warning_node_id: Option<ast::NodeId>)
-{
- match *error {
- SelectionError::Unimplemented => {
- if let ObligationCauseCode::CompareImplMethodObligation = obligation.cause.code {
- span_err!(
- infcx.tcx.sess, obligation.cause.span, E0276,
- "the requirement `{}` appears on the impl \
- method but not on the corresponding trait method",
- obligation.predicate);
- } else {
- match obligation.predicate {
- ty::Predicate::Trait(ref trait_predicate) => {
- let trait_predicate =
- infcx.resolve_type_vars_if_possible(trait_predicate);
-
- if !infcx.tcx.sess.has_errors() || !trait_predicate.references_error() {
- let trait_ref = trait_predicate.to_poly_trait_ref();
-
- if let Some(warning_node_id) = warning_node_id {
- infcx.tcx.sess.add_lint(
- ::lint::builtin::UNSIZED_IN_TUPLE,
- warning_node_id,
- obligation.cause.span,
- format!("the trait bound `{}` is not satisfied",
- trait_ref.to_predicate()));
+ if self.tcx.sess.has_errors() && trait_predicate.references_error() {
return;
- }
-
- let mut err = struct_span_err!(
- infcx.tcx.sess, obligation.cause.span, E0277,
- "the trait bound `{}` is not satisfied",
- trait_ref.to_predicate());
-
- // Try to report a help message
-
- if !trait_ref.has_infer_types() &&
- predicate_can_apply(infcx, trait_ref)
- {
- // If a where-clause may be useful, remind the
- // user that they can add it.
- //
- // don't display an on-unimplemented note, as
- // these notes will often be of the form
- // "the type `T` can't be frobnicated"
- // which is somewhat confusing.
- err.help(&format!("consider adding a `where {}` bound",
- trait_ref.to_predicate()
- ));
- } else if let Some(s) = on_unimplemented_note(infcx, trait_ref,
- obligation.cause.span) {
- // Otherwise, if there is an on-unimplemented note,
- // display it.
- err.note(&s);
} else {
- // If we can't show anything useful, try to find
- // similar impls.
+ let trait_ref = trait_predicate.to_poly_trait_ref();
+
+ if let Some(warning_node_id) = warning_node_id {
+ self.tcx.sess.add_lint(
+ ::lint::builtin::UNSIZED_IN_TUPLE,
+ warning_node_id,
+ obligation.cause.span,
+ format!("the trait bound `{}` is not satisfied",
+ trait_ref.to_predicate()));
+ return;
+ }
- let impl_candidates =
- find_similar_impl_candidates(infcx, trait_ref);
- if impl_candidates.len() > 0 {
- report_similar_impl_candidates(&mut err, &impl_candidates);
+ let mut err = struct_span_err!(
+ self.tcx.sess, span, E0277,
+ "the trait bound `{}` is not satisfied",
+ trait_ref.to_predicate());
+
+ // Try to report a help message
+
+ if !trait_ref.has_infer_types() &&
+ self.predicate_can_apply(trait_ref)
+ {
+ // If a where-clause may be useful, remind the
+ // user that they can add it.
+ //
+ // don't display an on-unimplemented note, as
+ // these notes will often be of the form
+ // "the type `T` can't be frobnicated"
+ // which is somewhat confusing.
+ err.help(&format!("consider adding a `where {}` bound",
+ trait_ref.to_predicate()
+ ));
+ } else if let Some(s) =
+ self.on_unimplemented_note(trait_ref, span) {
+ // Otherwise, if there is an on-unimplemented note,
+ // display it.
+ err.note(&s);
+ } else {
+ // If we can't show anything useful, try to find
+ // similar impls.
+
+ self.report_similar_impl_candidates(trait_ref, &mut err);
}
+ err
}
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+ },
+ ty::Predicate::Equate(ref predicate) => {
+ let predicate = self.resolve_type_vars_if_possible(predicate);
+ let err = self.equality_predicate(span,
+ &predicate).err().unwrap();
+ struct_span_err!(self.tcx.sess, span, E0278,
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate, err)
}
- },
- ty::Predicate::Equate(ref predicate) => {
- let predicate = infcx.resolve_type_vars_if_possible(predicate);
- let err = infcx.equality_predicate(obligation.cause.span,
- &predicate).err().unwrap();
- let mut err = struct_span_err!(
- infcx.tcx.sess, obligation.cause.span, E0278,
- "the requirement `{}` is not satisfied (`{}`)",
- predicate,
- err);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
- }
- ty::Predicate::RegionOutlives(ref predicate) => {
- let predicate = infcx.resolve_type_vars_if_possible(predicate);
- let err = infcx.region_outlives_predicate(obligation.cause.span,
- &predicate).err().unwrap();
- let mut err = struct_span_err!(
- infcx.tcx.sess, obligation.cause.span, E0279,
- "the requirement `{}` is not satisfied (`{}`)",
- predicate,
- err);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
- }
+ ty::Predicate::RegionOutlives(ref predicate) => {
+ let predicate = self.resolve_type_vars_if_possible(predicate);
+ let err = self.region_outlives_predicate(span,
+ &predicate).err().unwrap();
+ struct_span_err!(self.tcx.sess, span, E0279,
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate, err)
+ }
- ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => {
- let predicate =
- infcx.resolve_type_vars_if_possible(&obligation.predicate);
- let mut err = struct_span_err!(
- infcx.tcx.sess, obligation.cause.span, E0280,
- "the requirement `{}` is not satisfied",
- predicate);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
- }
+ ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => {
+ let predicate =
+ self.resolve_type_vars_if_possible(&obligation.predicate);
+ struct_span_err!(self.tcx.sess, span, E0280,
+ "the requirement `{}` is not satisfied",
+ predicate)
+ }
- ty::Predicate::ObjectSafe(trait_def_id) => {
- let violations = object_safety_violations(
- infcx.tcx, trait_def_id);
- let err = report_object_safety_error(infcx.tcx,
- obligation.cause.span,
- trait_def_id,
- warning_node_id,
- violations);
- if let Some(mut err) = err {
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+ ty::Predicate::ObjectSafe(trait_def_id) => {
+ let violations = self.tcx.object_safety_violations(trait_def_id);
+ let err = self.tcx.report_object_safety_error(span,
+ trait_def_id,
+ warning_node_id,
+ violations);
+ if let Some(err) = err {
+ err
+ } else {
+ return;
+ }
}
- }
- ty::Predicate::ClosureKind(closure_def_id, kind) => {
- let found_kind = infcx.closure_kind(closure_def_id).unwrap();
- let closure_span = infcx.tcx.map.span_if_local(closure_def_id).unwrap();
- let mut err = struct_span_err!(
- infcx.tcx.sess, closure_span, E0525,
- "expected a closure that implements the `{}` trait, but this closure \
- only implements `{}`",
- kind,
- found_kind);
- err.span_note(
- obligation.cause.span,
- &format!("the requirement to implement `{}` derives from here", kind));
- err.emit();
- }
+ ty::Predicate::ClosureKind(closure_def_id, kind) => {
+ let found_kind = self.closure_kind(closure_def_id).unwrap();
+ let closure_span = self.tcx.map.span_if_local(closure_def_id).unwrap();
+ let mut err = struct_span_err!(
+ self.tcx.sess, closure_span, E0525,
+ "expected a closure that implements the `{}` trait, \
+ but this closure only implements `{}`",
+ kind,
+ found_kind);
+ err.span_note(
+ obligation.cause.span,
+ &format!("the requirement to implement \
+ `{}` derives from here", kind));
+ err.emit();
+ return;
+ }
- ty::Predicate::WellFormed(ty) => {
- // WF predicates cannot themselves make
- // errors. They can only block due to
- // ambiguity; otherwise, they always
- // degenerate into other obligations
- // (which may fail).
- span_bug!(
- obligation.cause.span,
- "WF predicate not satisfied for {:?}",
- ty);
- }
+ ty::Predicate::WellFormed(ty) => {
+ // WF predicates cannot themselves make
+ // errors. They can only block due to
+ // ambiguity; otherwise, they always
+ // degenerate into other obligations
+ // (which may fail).
+ span_bug!(span, "WF predicate not satisfied for {:?}", ty);
+ }
- ty::Predicate::Rfc1592(ref data) => {
- span_bug!(
- obligation.cause.span,
- "RFC1592 predicate not satisfied for {:?}",
- data);
+ ty::Predicate::Rfc1592(ref data) => {
+ span_bug!(
+ obligation.cause.span,
+ "RFC1592 predicate not satisfied for {:?}",
+ data);
+ }
}
}
}
- }
- OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
- let expected_trait_ref = infcx.resolve_type_vars_if_possible(&*expected_trait_ref);
- let actual_trait_ref = infcx.resolve_type_vars_if_possible(&*actual_trait_ref);
- if !actual_trait_ref.self_ty().references_error() {
- let mut err = struct_span_err!(
- infcx.tcx.sess, obligation.cause.span, E0281,
+ OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
+ let expected_trait_ref = self.resolve_type_vars_if_possible(&*expected_trait_ref);
+ let actual_trait_ref = self.resolve_type_vars_if_possible(&*actual_trait_ref);
+ if actual_trait_ref.self_ty().references_error() {
+ return;
+ }
+ struct_span_err!(self.tcx.sess, span, E0281,
"type mismatch: the type `{}` implements the trait `{}`, \
but the trait `{}` is required ({})",
expected_trait_ref.self_ty(),
expected_trait_ref,
actual_trait_ref,
- e);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+ e)
}
- }
- TraitNotObjectSafe(did) => {
- let violations = object_safety_violations(infcx.tcx, did);
- let err = report_object_safety_error(infcx.tcx, obligation.cause.span, did,
- warning_node_id,
- violations);
- if let Some(mut err) = err {
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+ TraitNotObjectSafe(did) => {
+ let violations = self.tcx.object_safety_violations(did);
+ let err = self.tcx.report_object_safety_error(span, did,
+ warning_node_id,
+ violations);
+ if let Some(err) = err {
+ err
+ } else {
+ return;
+ }
}
- }
+ };
+ self.note_obligation_cause(&mut err, obligation);
+ err.emit();
}
}
-pub fn report_object_safety_error<'tcx>(tcx: &TyCtxt<'tcx>,
- span: Span,
- trait_def_id: DefId,
- warning_node_id: Option<ast::NodeId>,
- violations: Vec<ObjectSafetyViolation>)
- -> Option<DiagnosticBuilder<'tcx>>
-{
- let mut err = match warning_node_id {
- Some(_) => None,
- None => {
- Some(struct_span_err!(
- tcx.sess, span, E0038,
- "the trait `{}` cannot be made into an object",
- tcx.item_path_str(trait_def_id)))
- }
- };
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn recursive_type_with_infinite_size_error(self,
+ type_def_id: DefId)
+ -> DiagnosticBuilder<'tcx>
+ {
+ assert!(type_def_id.is_local());
+ let span = self.map.span_if_local(type_def_id).unwrap();
+ let mut err = struct_span_err!(self.sess, span, E0072,
+ "recursive type `{}` has infinite size",
+ self.item_path_str(type_def_id));
+ err.help(&format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \
+ at some point to make `{}` representable",
+ self.item_path_str(type_def_id)));
+ err
+ }
- let mut reported_violations = FnvHashSet();
- for violation in violations {
- if !reported_violations.insert(violation.clone()) {
- continue;
- }
- let buf;
- let note = match violation {
- ObjectSafetyViolation::SizedSelf => {
- "the trait cannot require that `Self : Sized`"
+ pub fn report_object_safety_error(self,
+ span: Span,
+ trait_def_id: DefId,
+ warning_node_id: Option<ast::NodeId>,
+ violations: Vec<ObjectSafetyViolation>)
+ -> Option<DiagnosticBuilder<'tcx>>
+ {
+ let mut err = match warning_node_id {
+ Some(_) => None,
+ None => {
+ Some(struct_span_err!(
+ self.sess, span, E0038,
+ "the trait `{}` cannot be made into an object",
+ self.item_path_str(trait_def_id)))
}
+ };
- ObjectSafetyViolation::SupertraitSelf => {
- "the trait cannot use `Self` as a type parameter \
- in the supertrait listing"
+ let mut reported_violations = FnvHashSet();
+ for violation in violations {
+ if !reported_violations.insert(violation.clone()) {
+ continue;
}
+ let buf;
+ let note = match violation {
+ ObjectSafetyViolation::SizedSelf => {
+ "the trait cannot require that `Self : Sized`"
+ }
- ObjectSafetyViolation::Method(method,
- MethodViolationCode::StaticMethod) => {
- buf = format!("method `{}` has no receiver",
- method.name);
- &buf
- }
+ ObjectSafetyViolation::SupertraitSelf => {
+ "the trait cannot use `Self` as a type parameter \
+ in the supertrait listing"
+ }
- ObjectSafetyViolation::Method(method,
- MethodViolationCode::ReferencesSelf) => {
- buf = format!("method `{}` references the `Self` type \
- in its arguments or return type",
- method.name);
- &buf
- }
+ ObjectSafetyViolation::Method(method,
+ MethodViolationCode::StaticMethod) => {
+ buf = format!("method `{}` has no receiver",
+ method.name);
+ &buf
+ }
- ObjectSafetyViolation::Method(method,
- MethodViolationCode::Generic) => {
- buf = format!("method `{}` has generic type parameters",
- method.name);
- &buf
+ ObjectSafetyViolation::Method(method,
+ MethodViolationCode::ReferencesSelf) => {
+ buf = format!("method `{}` references the `Self` type \
+ in its arguments or return type",
+ method.name);
+ &buf
+ }
+
+ ObjectSafetyViolation::Method(method,
+ MethodViolationCode::Generic) => {
+ buf = format!("method `{}` has generic type parameters",
+ method.name);
+ &buf
+ }
+ };
+ match (warning_node_id, &mut err) {
+ (Some(node_id), &mut None) => {
+ self.sess.add_lint(
+ ::lint::builtin::OBJECT_UNSAFE_FRAGMENT,
+ node_id,
+ span,
+ note.to_string());
+ }
+ (None, &mut Some(ref mut err)) => {
+ err.note(note);
+ }
+ _ => unreachable!()
}
- };
- match (warning_node_id, &mut err) {
- (Some(node_id), &mut None) => {
- tcx.sess.add_lint(
- ::lint::builtin::OBJECT_UNSAFE_FRAGMENT,
- node_id,
- span,
- note.to_string());
- }
- (None, &mut Some(ref mut err)) => {
- err.note(note);
- }
- _ => unreachable!()
}
+ err
}
- err
}
-pub fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>) {
- // Unable to successfully determine, probably means
- // insufficient type information, but could mean
- // ambiguous impls. The latter *ought* to be a
- // coherence violation, so we don't report it here.
-
- let predicate = infcx.resolve_type_vars_if_possible(&obligation.predicate);
-
- debug!("maybe_report_ambiguity(predicate={:?}, obligation={:?})",
- predicate,
- obligation);
-
- // Ambiguity errors are often caused as fallout from earlier
- // errors. So just ignore them if this infcx is tainted.
- if infcx.is_tainted_by_errors() {
- return;
- }
-
- match predicate {
- ty::Predicate::Trait(ref data) => {
- let trait_ref = data.to_poly_trait_ref();
- let self_ty = trait_ref.self_ty();
- let all_types = &trait_ref.substs().types;
- if all_types.references_error() {
- } else {
- // Typically, this ambiguity should only happen if
- // there are unresolved type inference variables
- // (otherwise it would suggest a coherence
- // failure). But given #21974 that is not necessarily
- // the case -- we can have multiple where clauses that
- // are only distinguished by a region, which results
- // in an ambiguity even when all types are fully
- // known, since we don't dispatch based on region
- // relationships.
-
- // This is kind of a hack: it frequently happens that some earlier
- // error prevents types from being fully inferred, and then we get
- // a bunch of uninteresting errors saying something like "<generic
- // #0> doesn't implement Sized". It may even be true that we
- // could just skip over all checks where the self-ty is an
- // inference variable, but I was afraid that there might be an
- // inference variable created, registered as an obligation, and
- // then never forced by writeback, and hence by skipping here we'd
- // be ignoring the fact that we don't KNOW the type works
- // out. Though even that would probably be harmless, given that
- // we're only talking about builtin traits, which are known to be
- // inhabited. But in any case I just threw in this check for
- // has_errors() to be sure that compilation isn't happening
- // anyway. In that case, why inundate the user.
- if !infcx.tcx.sess.has_errors() {
- if
- infcx.tcx.lang_items.sized_trait()
- .map_or(false, |sized_id| sized_id == trait_ref.def_id())
- {
- need_type_info(infcx, obligation.cause.span, self_ty);
- } else {
- let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0283,
- "type annotations required: \
- cannot resolve `{}`",
- predicate);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
+ fn maybe_report_ambiguity(&self, obligation: &PredicateObligation<'tcx>) {
+ // Unable to successfully determine, probably means
+ // insufficient type information, but could mean
+ // ambiguous impls. The latter *ought* to be a
+ // coherence violation, so we don't report it here.
+
+ let predicate = self.resolve_type_vars_if_possible(&obligation.predicate);
+
+ debug!("maybe_report_ambiguity(predicate={:?}, obligation={:?})",
+ predicate,
+ obligation);
+
+ // Ambiguity errors are often caused as fallout from earlier
+ // errors. So just ignore them if this infcx is tainted.
+ if self.is_tainted_by_errors() {
+ return;
+ }
+
+ match predicate {
+ ty::Predicate::Trait(ref data) => {
+ let trait_ref = data.to_poly_trait_ref();
+ let self_ty = trait_ref.self_ty();
+ let all_types = &trait_ref.substs().types;
+ if all_types.references_error() {
+ } else {
+ // Typically, this ambiguity should only happen if
+ // there are unresolved type inference variables
+ // (otherwise it would suggest a coherence
+ // failure). But given #21974 that is not necessarily
+ // the case -- we can have multiple where clauses that
+ // are only distinguished by a region, which results
+ // in an ambiguity even when all types are fully
+ // known, since we don't dispatch based on region
+ // relationships.
+
+ // This is kind of a hack: it frequently happens that some earlier
+ // error prevents types from being fully inferred, and then we get
+ // a bunch of uninteresting errors saying something like "<generic
+ // #0> doesn't implement Sized". It may even be true that we
+ // could just skip over all checks where the self-ty is an
+ // inference variable, but I was afraid that there might be an
+ // inference variable created, registered as an obligation, and
+ // then never forced by writeback, and hence by skipping here we'd
+ // be ignoring the fact that we don't KNOW the type works
+ // out. Though even that would probably be harmless, given that
+ // we're only talking about builtin traits, which are known to be
+ // inhabited. But in any case I just threw in this check for
+ // has_errors() to be sure that compilation isn't happening
+ // anyway. In that case, why inundate the user.
+ if !self.tcx.sess.has_errors() {
+ if
+ self.tcx.lang_items.sized_trait()
+ .map_or(false, |sized_id| sized_id == trait_ref.def_id())
+ {
+ self.need_type_info(obligation.cause.span, self_ty);
+ } else {
+ let mut err = struct_span_err!(self.tcx.sess,
+ obligation.cause.span, E0283,
+ "type annotations required: \
+ cannot resolve `{}`",
+ predicate);
+ self.note_obligation_cause(&mut err, obligation);
+ err.emit();
+ }
}
}
}
- }
- ty::Predicate::WellFormed(ty) => {
- // Same hacky approach as above to avoid deluging user
- // with error messages.
- if !ty.references_error() && !infcx.tcx.sess.has_errors() {
- need_type_info(infcx, obligation.cause.span, ty);
+ ty::Predicate::WellFormed(ty) => {
+ // Same hacky approach as above to avoid deluging user
+ // with error messages.
+ if !ty.references_error() && !self.tcx.sess.has_errors() {
+ self.need_type_info(obligation.cause.span, ty);
+ }
}
- }
- _ => {
- if !infcx.tcx.sess.has_errors() {
- let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0284,
- "type annotations required: cannot resolve `{}`",
- predicate);
- note_obligation_cause(infcx, &mut err, obligation);
- err.emit();
+ _ => {
+ if !self.tcx.sess.has_errors() {
+ let mut err = struct_span_err!(self.tcx.sess,
+ obligation.cause.span, E0284,
+ "type annotations required: \
+ cannot resolve `{}`",
+ predicate);
+ self.note_obligation_cause(&mut err, obligation);
+ err.emit();
+ }
}
}
}
-}
-/// Returns whether the trait predicate may apply for *some* assignment
-/// to the type parameters.
-fn predicate_can_apply<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- pred: ty::PolyTraitRef<'tcx>)
- -> bool
-{
- struct ParamToVarFolder<'a, 'tcx: 'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
- var_map: FnvHashMap<Ty<'tcx>, Ty<'tcx>>
- }
+ /// Returns whether the trait predicate may apply for *some* assignment
+ /// to the type parameters.
+ fn predicate_can_apply(&self, pred: ty::PolyTraitRef<'tcx>) -> bool {
+ struct ParamToVarFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ var_map: FnvHashMap<Ty<'tcx>, Ty<'tcx>>
+ }
- impl<'a, 'tcx> TypeFolder<'tcx> for ParamToVarFolder<'a, 'tcx>
- {
- fn tcx(&self) -> &TyCtxt<'tcx> { self.infcx.tcx }
+ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for ParamToVarFolder<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.infcx.tcx }
- fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
- if let ty::TyParam(..) = ty.sty {
- let infcx = self.infcx;
- self.var_map.entry(ty).or_insert_with(|| infcx.next_ty_var())
- } else {
- ty.super_fold_with(self)
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::TyParam(..) = ty.sty {
+ let infcx = self.infcx;
+ self.var_map.entry(ty).or_insert_with(|| infcx.next_ty_var())
+ } else {
+ ty.super_fold_with(self)
+ }
}
}
- }
- infcx.probe(|_| {
- let mut selcx = SelectionContext::new(infcx);
+ self.probe(|_| {
+ let mut selcx = SelectionContext::new(self);
- let cleaned_pred = pred.fold_with(&mut ParamToVarFolder {
- infcx: infcx,
- var_map: FnvHashMap()
- });
+ let cleaned_pred = pred.fold_with(&mut ParamToVarFolder {
+ infcx: self,
+ var_map: FnvHashMap()
+ });
- let cleaned_pred = super::project::normalize(
- &mut selcx,
- ObligationCause::dummy(),
- &cleaned_pred
- ).value;
+ let cleaned_pred = super::project::normalize(
+ &mut selcx,
+ ObligationCause::dummy(),
+ &cleaned_pred
+ ).value;
- let obligation = Obligation::new(
- ObligationCause::dummy(),
- cleaned_pred.to_predicate()
- );
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ cleaned_pred.to_predicate()
+ );
- selcx.evaluate_obligation(&obligation)
- })
-}
+ selcx.evaluate_obligation(&obligation)
+ })
+ }
-fn need_type_info<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- span: Span,
- ty: Ty<'tcx>)
-{
- span_err!(infcx.tcx.sess, span, E0282,
- "unable to infer enough type information about `{}`; \
- type annotations or generic parameter binding required",
- ty);
-}
+ fn need_type_info(&self, span: Span, ty: Ty<'tcx>) {
+ span_err!(self.tcx.sess, span, E0282,
+ "unable to infer enough type information about `{}`; \
+ type annotations or generic parameter binding required",
+ ty);
+ }
-fn note_obligation_cause<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
- err: &mut DiagnosticBuilder,
- obligation: &Obligation<'tcx, T>)
- where T: fmt::Display
-{
- note_obligation_cause_code(infcx,
- err,
- &obligation.predicate,
- &obligation.cause.code);
-}
+ fn note_obligation_cause<T>(&self,
+ err: &mut DiagnosticBuilder,
+ obligation: &Obligation<'tcx, T>)
+ where T: fmt::Display
+ {
+ self.note_obligation_cause_code(err,
+ &obligation.predicate,
+ &obligation.cause.code);
+ }
-fn note_obligation_cause_code<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>,
- err: &mut DiagnosticBuilder,
- predicate: &T,
- cause_code: &ObligationCauseCode<'tcx>)
- where T: fmt::Display
-{
- let tcx = infcx.tcx;
- match *cause_code {
- ObligationCauseCode::MiscObligation => { }
- ObligationCauseCode::SliceOrArrayElem => {
- err.note("slice and array elements must have `Sized` type");
- }
- ObligationCauseCode::TupleElem => {
- err.note("tuple elements must have `Sized` type");
- }
- ObligationCauseCode::ProjectionWf(data) => {
- err.note(&format!("required so that the projection `{}` is well-formed",
- data));
- }
- ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => {
- err.note(&format!("required so that reference `{}` does not outlive its referent",
- ref_ty));
- }
- ObligationCauseCode::ItemObligation(item_def_id) => {
- let item_name = tcx.item_path_str(item_def_id);
- err.note(&format!("required by `{}`", item_name));
- }
- ObligationCauseCode::ObjectCastObligation(object_ty) => {
- err.note(&format!("required for the cast to the object type `{}`",
- infcx.ty_to_string(object_ty)));
- }
- ObligationCauseCode::RepeatVec => {
- err.note("the `Copy` trait is required because the \
- repeated element will be copied");
- }
- ObligationCauseCode::VariableType(_) => {
- err.note("all local variables must have a statically known size");
- }
- ObligationCauseCode::ReturnType => {
- err.note("the return type of a function must have a \
- statically known size");
- }
- ObligationCauseCode::AssignmentLhsSized => {
- err.note("the left-hand-side of an assignment must have a statically known size");
- }
- ObligationCauseCode::StructInitializerSized => {
- err.note("structs must have a statically known size to be initialized");
- }
- ObligationCauseCode::ClosureCapture(var_id, _, builtin_bound) => {
- let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
- let trait_name = tcx.item_path_str(def_id);
- let name = tcx.local_var_name_str(var_id);
- err.note(
- &format!("the closure that captures `{}` requires that all captured variables \
- implement the trait `{}`",
- name,
- trait_name));
- }
- ObligationCauseCode::FieldSized => {
- err.note("only the last field of a struct or enum variant \
- may have a dynamically sized type");
- }
- ObligationCauseCode::SharedStatic => {
- err.note("shared static variables must have a type that implements `Sync`");
- }
- ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
- let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref);
- err.note(&format!("required because it appears within the type `{}`",
- parent_trait_ref.0.self_ty()));
- let parent_predicate = parent_trait_ref.to_predicate();
- note_obligation_cause_code(infcx,
- err,
- &parent_predicate,
- &data.parent_code);
- }
- ObligationCauseCode::ImplDerivedObligation(ref data) => {
- let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref);
- err.note(
- &format!("required because of the requirements on the impl of `{}` for `{}`",
- parent_trait_ref,
- parent_trait_ref.0.self_ty()));
- let parent_predicate = parent_trait_ref.to_predicate();
- note_obligation_cause_code(infcx,
- err,
- &parent_predicate,
- &data.parent_code);
- }
- ObligationCauseCode::CompareImplMethodObligation => {
- err.note(
- &format!("the requirement `{}` appears on the impl method \
- but not on the corresponding trait method",
- predicate));
+ fn note_obligation_cause_code<T>(&self,
+ err: &mut DiagnosticBuilder,
+ predicate: &T,
+ cause_code: &ObligationCauseCode<'tcx>)
+ where T: fmt::Display
+ {
+ let tcx = self.tcx;
+ match *cause_code {
+ ObligationCauseCode::MiscObligation => { }
+ ObligationCauseCode::SliceOrArrayElem => {
+ err.note("slice and array elements must have `Sized` type");
+ }
+ ObligationCauseCode::TupleElem => {
+ err.note("tuple elements must have `Sized` type");
+ }
+ ObligationCauseCode::ProjectionWf(data) => {
+ err.note(&format!("required so that the projection `{}` is well-formed",
+ data));
+ }
+ ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => {
+ err.note(&format!("required so that reference `{}` does not outlive its referent",
+ ref_ty));
+ }
+ ObligationCauseCode::ItemObligation(item_def_id) => {
+ let item_name = tcx.item_path_str(item_def_id);
+ err.note(&format!("required by `{}`", item_name));
+ }
+ ObligationCauseCode::ObjectCastObligation(object_ty) => {
+ err.note(&format!("required for the cast to the object type `{}`",
+ self.ty_to_string(object_ty)));
+ }
+ ObligationCauseCode::RepeatVec => {
+ err.note("the `Copy` trait is required because the \
+ repeated element will be copied");
+ }
+ ObligationCauseCode::VariableType(_) => {
+ err.note("all local variables must have a statically known size");
+ }
+ ObligationCauseCode::ReturnType => {
+ err.note("the return type of a function must have a \
+ statically known size");
+ }
+ ObligationCauseCode::AssignmentLhsSized => {
+ err.note("the left-hand-side of an assignment must have a statically known size");
+ }
+ ObligationCauseCode::StructInitializerSized => {
+ err.note("structs must have a statically known size to be initialized");
+ }
+ ObligationCauseCode::ClosureCapture(var_id, _, builtin_bound) => {
+ let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
+ let trait_name = tcx.item_path_str(def_id);
+ let name = tcx.local_var_name_str(var_id);
+ err.note(
+ &format!("the closure that captures `{}` requires that all captured variables \
+ implement the trait `{}`",
+ name,
+ trait_name));
+ }
+ ObligationCauseCode::FieldSized => {
+ err.note("only the last field of a struct or enum variant \
+ may have a dynamically sized type");
+ }
+ ObligationCauseCode::SharedStatic => {
+ err.note("shared static variables must have a type that implements `Sync`");
+ }
+ ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
+ let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref);
+ err.note(&format!("required because it appears within the type `{}`",
+ parent_trait_ref.0.self_ty()));
+ let parent_predicate = parent_trait_ref.to_predicate();
+ self.note_obligation_cause_code(err,
+ &parent_predicate,
+ &data.parent_code);
+ }
+ ObligationCauseCode::ImplDerivedObligation(ref data) => {
+ let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref);
+ err.note(
+ &format!("required because of the requirements on the impl of `{}` for `{}`",
+ parent_trait_ref,
+ parent_trait_ref.0.self_ty()));
+ let parent_predicate = parent_trait_ref.to_predicate();
+ self.note_obligation_cause_code(err,
+ &parent_predicate,
+ &data.parent_code);
+ }
+ ObligationCauseCode::CompareImplMethodObligation => {
+ err.note(
+ &format!("the requirement `{}` appears on the impl method \
+ but not on the corresponding trait method",
+ predicate));
+ }
}
}
-}
-fn suggest_new_overflow_limit(tcx: &TyCtxt, err:&mut DiagnosticBuilder) {
- let current_limit = tcx.sess.recursion_limit.get();
- let suggested_limit = current_limit * 2;
- err.note(&format!(
- "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
- suggested_limit));
+ fn suggest_new_overflow_limit(&self, err: &mut DiagnosticBuilder) {
+ let current_limit = self.tcx.sess.recursion_limit.get();
+ let suggested_limit = current_limit * 2;
+ err.note(&format!(
+ "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
+ suggested_limit));
+ }
}
use super::CodeAmbiguity;
use super::CodeProjectionError;
use super::CodeSelectionError;
-use super::is_object_safe;
use super::FulfillmentError;
use super::FulfillmentErrorCode;
use super::ObligationCause;
use super::PredicateObligation;
use super::project;
-use super::report_overflow_error_cycle;
use super::select::SelectionContext;
use super::Unimplemented;
-use super::util::predicate_for_builtin_bound;
pub struct GlobalFulfilledPredicates<'tcx> {
set: FnvHashSet<ty::PolyTraitPredicate<'tcx>>,
pub stalled_on: Vec<Ty<'tcx>>,
}
-impl<'tcx> FulfillmentContext<'tcx> {
+impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> {
/// Creates a new fulfillment context.
pub fn new() -> FulfillmentContext<'tcx> {
FulfillmentContext {
/// `SomeTrait` or a where clause that lets us unify `$0` with
/// something concrete. If this fails, we'll unify `$0` with
/// `projection_ty` again.
- pub fn normalize_projection_type<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>,
- projection_ty: ty::ProjectionTy<'tcx>,
- cause: ObligationCause<'tcx>)
- -> Ty<'tcx>
+ pub fn normalize_projection_type(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>)
+ -> Ty<'tcx>
{
debug!("normalize_projection_type(projection_ty={:?})",
projection_ty);
normalized.value
}
- pub fn register_builtin_bound<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>,
- ty: Ty<'tcx>,
- builtin_bound: ty::BuiltinBound,
- cause: ObligationCause<'tcx>)
+ pub fn register_builtin_bound(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty<'tcx>,
+ builtin_bound: ty::BuiltinBound,
+ cause: ObligationCause<'tcx>)
{
- match predicate_for_builtin_bound(infcx.tcx, cause, builtin_bound, 0, ty) {
+ match infcx.tcx.predicate_for_builtin_bound(cause, builtin_bound, 0, ty) {
Ok(predicate) => {
self.register_predicate_obligation(infcx, predicate);
}
}
}
- pub fn register_region_obligation<'a>(&mut self,
- t_a: Ty<'tcx>,
- r_b: ty::Region,
- cause: ObligationCause<'tcx>)
+ pub fn register_region_obligation(&mut self,
+ t_a: Ty<'tcx>,
+ r_b: ty::Region,
+ cause: ObligationCause<'tcx>)
{
register_region_obligation(t_a, r_b, cause, &mut self.region_obligations);
}
- pub fn register_predicate_obligation<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>,
- obligation: PredicateObligation<'tcx>)
+ pub fn register_predicate_obligation(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ obligation: PredicateObligation<'tcx>)
{
// this helps to reduce duplicate errors, as well as making
// debug output much nicer to read and so on.
self.predicates.push_tree(obligation, LocalFulfilledPredicates::new());
}
- pub fn register_rfc1592_obligation<'a>(&mut self,
- _infcx: &InferCtxt<'a,'tcx>,
- obligation: PredicateObligation<'tcx>)
+ pub fn register_rfc1592_obligation(&mut self,
+ _infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ obligation: PredicateObligation<'tcx>)
{
self.rfc1592_obligations.push(obligation);
}
}
}
- pub fn select_rfc1592_obligations<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>)
+ pub fn select_rfc1592_obligations(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
while !self.rfc1592_obligations.is_empty() {
Ok(())
}
- pub fn select_all_or_error<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>)
- -> Result<(),Vec<FulfillmentError<'tcx>>>
+
+ pub fn select_all_or_error(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>)
+ -> Result<(),Vec<FulfillmentError<'tcx>>>
{
self.select_where_possible(infcx)?;
}
}
- pub fn select_where_possible<'a>(&mut self,
- infcx: &InferCtxt<'a,'tcx>)
- -> Result<(),Vec<FulfillmentError<'tcx>>>
+ pub fn select_where_possible(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>)
+ -> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx);
self.select(&mut selcx)
self.predicates.pending_obligations()
}
- fn is_duplicate_or_add(&mut self,
- tcx: &TyCtxt<'tcx>,
+ fn is_duplicate_or_add(&mut self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
predicate: &ty::Predicate<'tcx>)
-> bool {
// For "global" predicates -- that is, predicates that don't
/// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
/// only attempts to select obligations that haven't been seen before.
- fn select<'a>(&mut self,
- selcx: &mut SelectionContext<'a, 'tcx>)
- -> Result<(),Vec<FulfillmentError<'tcx>>>
- {
+ fn select(&mut self, selcx: &mut SelectionContext<'a, 'gcx, 'tcx>)
+ -> Result<(),Vec<FulfillmentError<'tcx>>> {
debug!("select(obligation-forest-size={})", self.predicates.len());
let mut errors = Vec::new();
// these are obligations that were proven to be true.
for pending_obligation in outcome.completed {
let predicate = &pending_obligation.obligation.predicate;
- selcx.tcx().fulfilled_predicates.borrow_mut().add_if_global(predicate);
+ selcx.tcx().fulfilled_predicates.borrow_mut()
+ .add_if_global(selcx.tcx(), predicate);
}
errors.extend(
}
/// Like `process_predicate1`, but wrap result into a pending predicate.
-fn process_predicate<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
- tree_cache: &mut LocalFulfilledPredicates<'tcx>,
- pending_obligation: &mut PendingPredicateObligation<'tcx>,
- backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
- region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>,
- rfc1592_obligations: &mut Vec<PredicateObligation<'tcx>>)
- -> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
- FulfillmentErrorCode<'tcx>>
+fn process_predicate<'a, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
+ tree_cache: &mut LocalFulfilledPredicates<'tcx>,
+ pending_obligation: &mut PendingPredicateObligation<'tcx>,
+ backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
+ region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>,
+ rfc1592_obligations: &mut Vec<PredicateObligation<'tcx>>)
+ -> Result<Option<Vec<PendingPredicateObligation<'tcx>>>,
+ FulfillmentErrorCode<'tcx>>
{
match process_predicate1(selcx, pending_obligation, region_obligations,
rfc1592_obligations) {
}
}
-fn process_child_obligations<'a,'tcx>(
- selcx: &mut SelectionContext<'a,'tcx>,
+fn process_child_obligations<'a, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
tree_cache: &mut LocalFulfilledPredicates<'tcx>,
pending_obligation: &PredicateObligation<'tcx>,
backtrace: Backtrace<PendingPredicateObligation<'tcx>>,
debug!("process_child_obligations: coinductive match");
None
} else {
- report_overflow_error_cycle(selcx.infcx(), &cycle);
+ selcx.infcx().report_overflow_error_cycle(&cycle);
}
} else {
// Not a cycle. Just ignore this obligation then,
backtrace: Backtrace<'b, PendingPredicateObligation<'tcx>>,
}
-impl<'b, 'tcx> AncestorSet<'b, 'tcx> {
+impl<'a, 'b, 'gcx, 'tcx> AncestorSet<'b, 'tcx> {
fn new(backtrace: &Backtrace<'b, PendingPredicateObligation<'tcx>>) -> Self {
AncestorSet {
populated: false,
/// to `predicate` (`predicate` is assumed to be fully
/// type-resolved). Returns `None` if not; otherwise, returns
/// `Some` with the index within the backtrace.
- fn has<'a>(&mut self,
- infcx: &InferCtxt<'a, 'tcx>,
- predicate: &ty::Predicate<'tcx>)
- -> Option<usize> {
+ fn has(&mut self,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ predicate: &ty::Predicate<'tcx>)
+ -> Option<usize> {
// the first time, we have to populate the cache
if !self.populated {
let backtrace = self.backtrace.clone();
}
/// Return the set of type variables contained in a trait ref
-fn trait_ref_type_vars<'a, 'tcx>(selcx: &mut SelectionContext<'a, 'tcx>,
- t: ty::PolyTraitRef<'tcx>) -> Vec<Ty<'tcx>>
+fn trait_ref_type_vars<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
+ t: ty::PolyTraitRef<'tcx>) -> Vec<Ty<'tcx>>
{
t.skip_binder() // ok b/c this check doesn't care about regions
.input_types()
/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true
/// - `Ok(None)` if we don't have enough info to be sure
/// - `Err` if the predicate does not hold
-fn process_predicate1<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
- pending_obligation: &mut PendingPredicateObligation<'tcx>,
- region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>,
- rfc1592_obligations: &mut Vec<PredicateObligation<'tcx>>)
- -> Result<Option<Vec<PredicateObligation<'tcx>>>,
- FulfillmentErrorCode<'tcx>>
+fn process_predicate1<'a, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
+ pending_obligation: &mut PendingPredicateObligation<'tcx>,
+ region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>,
+ rfc1592_obligations: &mut Vec<PredicateObligation<'tcx>>)
+ -> Result<Option<Vec<PredicateObligation<'tcx>>>,
+ FulfillmentErrorCode<'tcx>>
{
// if we were stalled on some unresolved variables, first check
// whether any of them have been resolved; if not, don't bother
}
ty::Predicate::ObjectSafe(trait_def_id) => {
- if !is_object_safe(selcx.tcx(), trait_def_id) {
+ if !selcx.tcx().is_object_safe(trait_def_id) {
Err(CodeSelectionError(Unimplemented))
} else {
Ok(Some(Vec::new()))
/// - it also appears in the backtrace at some position `X`; and,
/// - all the predicates at positions `X..` between `X` an the top are
/// also defaulted traits.
-fn coinductive_match<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
- cycle: &[PredicateObligation<'tcx>])
- -> bool
+fn coinductive_match<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
+ cycle: &[PredicateObligation<'tcx>])
+ -> bool
{
let len = cycle.len();
})
}
-fn coinductive_obligation<'a, 'tcx>(selcx: &SelectionContext<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>)
- -> bool {
+fn coinductive_obligation<'a, 'gcx, 'tcx>(selcx: &SelectionContext<'a, 'gcx, 'tcx>,
+ obligation: &PredicateObligation<'tcx>)
+ -> bool {
match obligation.predicate {
ty::Predicate::Trait(ref data) => {
selcx.tcx().trait_has_default_impl(data.def_id())
}
}
-impl<'tcx> GlobalFulfilledPredicates<'tcx> {
- pub fn new(dep_graph: DepGraph) -> GlobalFulfilledPredicates<'tcx> {
+impl<'a, 'gcx, 'tcx> GlobalFulfilledPredicates<'gcx> {
+ pub fn new(dep_graph: DepGraph) -> GlobalFulfilledPredicates<'gcx> {
GlobalFulfilledPredicates {
set: FnvHashSet(),
dep_graph: dep_graph,
}
}
- fn add_if_global(&mut self, key: &ty::Predicate<'tcx>) {
+ fn add_if_global(&mut self, tcx: TyCtxt<'a, 'gcx, 'tcx>, key: &ty::Predicate<'tcx>) {
if let ty::Predicate::Trait(ref data) = *key {
// We only add things to the global predicate registry
// after the current task has proved them, and hence
// already has the required read edges, so we don't need
// to add any more edges here.
if data.is_global() {
- if self.set.insert(data.clone()) {
- debug!("add_if_global: global predicate `{:?}` added", data);
+ if let Some(data) = tcx.lift_to_global(data) {
+ if self.set.insert(data.clone()) {
+ debug!("add_if_global: global predicate `{:?}` added", data);
+ }
}
}
}
use hir::def_id::DefId;
use middle::free_region::FreeRegionMap;
use ty::subst;
-use ty::{self, Ty, TypeFoldable};
-use infer::{self, fixup_err_to_string, InferCtxt};
+use ty::{self, Ty, TyCtxt, TypeFoldable};
+use infer::InferCtxt;
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
pub use self::error_reporting::TraitErrorKey;
-pub use self::error_reporting::recursive_type_with_infinite_size_error;
-pub use self::error_reporting::report_fulfillment_errors;
-pub use self::error_reporting::report_fulfillment_errors_as_warnings;
-pub use self::error_reporting::report_overflow_error;
-pub use self::error_reporting::report_overflow_error_cycle;
-pub use self::error_reporting::report_selection_error;
-pub use self::error_reporting::report_object_safety_error;
pub use self::coherence::orphan_check;
pub use self::coherence::overlapping_impls;
pub use self::coherence::OrphanCheckErr;
pub use self::fulfill::{FulfillmentContext, GlobalFulfilledPredicates, RegionObligation};
pub use self::project::{MismatchedProjectionTypes, ProjectionMode};
pub use self::project::{normalize, Normalized};
-pub use self::object_safety::is_object_safe;
-pub use self::object_safety::astconv_object_safety_violations;
-pub use self::object_safety::object_safety_violations;
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
-pub use self::object_safety::is_vtable_safe_method;
pub use self::select::{EvaluationCache, SelectionContext, SelectionCache};
pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
pub use self::select::{MethodMatchedData}; // intentionally don't export variants
-pub use self::specialize::{Overlap, specialization_graph, specializes, translate_substs};
+pub use self::specialize::{OverlapError, specialization_graph, specializes, translate_substs};
pub use self::util::elaborate_predicates;
-pub use self::util::get_vtable_index_of_object_method;
-pub use self::util::trait_ref_for_builtin_bound;
-pub use self::util::predicate_for_trait_def;
pub use self::util::supertraits;
pub use self::util::Supertraits;
pub use self::util::supertrait_def_ids;
pub use self::util::SupertraitDefIds;
pub use self::util::transitive_bounds;
-pub use self::util::upcast;
mod coherence;
mod error_reporting;
/// `bound` or is not known to meet bound (note that this is
/// conservative towards *no impl*, which is the opposite of the
/// `evaluate` methods).
-pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- ty: Ty<'tcx>,
- bound: ty::BuiltinBound,
- span: Span)
- -> bool
+pub fn type_known_to_meet_builtin_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty<'tcx>,
+ bound: ty::BuiltinBound,
+ span: Span)
+ -> bool
{
debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})",
ty,
let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
let obligation =
- util::predicate_for_builtin_bound(infcx.tcx, cause, bound, 0, ty);
+ infcx.tcx.predicate_for_builtin_bound(cause, bound, 0, ty);
let obligation = match obligation {
Ok(o) => o,
Err(..) => return false
// FIXME: this is gonna need to be removed ...
/// Normalizes the parameter environment, reporting errors if they occur.
-pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
- cause: ObligationCause<'tcx>)
- -> ty::ParameterEnvironment<'a,'tcx>
+pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ unnormalized_env: ty::ParameterEnvironment<'tcx>,
+ cause: ObligationCause<'tcx>)
+ -> ty::ParameterEnvironment<'tcx>
{
// I'm not wild about reporting errors here; I'd prefer to
// have the errors get reported at a defined place (e.g.,
// and errors will get reported then; so after typeck we
// can be sure that no errors should occur.
- let tcx = unnormalized_env.tcx;
let span = cause.span;
let body_id = cause.body_id;
let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- Some(elaborated_env),
- ProjectionMode::AnyFinal);
- let predicates = match fully_normalize(&infcx,
- cause,
- &infcx.parameter_environment.caller_bounds) {
- Ok(predicates) => predicates,
- Err(errors) => {
- report_fulfillment_errors(&infcx, &errors);
- return infcx.parameter_environment; // an unnormalized env is better than nothing
- }
- };
-
- debug!("normalize_param_env_or_error: normalized predicates={:?}",
- predicates);
+ tcx.infer_ctxt(None, Some(elaborated_env), ProjectionMode::AnyFinal).enter(|infcx| {
+ let predicates = match fully_normalize(&infcx, cause,
+ &infcx.parameter_environment.caller_bounds) {
+ Ok(predicates) => predicates,
+ Err(errors) => {
+ infcx.report_fulfillment_errors(&errors);
+ // An unnormalized env is better than nothing.
+ return infcx.parameter_environment;
+ }
+ };
+
+ debug!("normalize_param_env_or_error: normalized predicates={:?}",
+ predicates);
+
+ let free_regions = FreeRegionMap::new();
+ infcx.resolve_regions_and_report_errors(&free_regions, body_id);
+ let predicates = match infcx.fully_resolve(&predicates) {
+ Ok(predicates) => predicates,
+ Err(fixup_err) => {
+ // If we encounter a fixup error, it means that some type
+ // variable wound up unconstrained. I actually don't know
+ // if this can happen, and I certainly don't expect it to
+ // happen often, but if it did happen it probably
+ // represents a legitimate failure due to some kind of
+ // unconstrained variable, and it seems better not to ICE,
+ // all things considered.
+ tcx.sess.span_err(span, &fixup_err.to_string());
+ // An unnormalized env is better than nothing.
+ return infcx.parameter_environment;
+ }
+ };
- let free_regions = FreeRegionMap::new();
- infcx.resolve_regions_and_report_errors(&free_regions, body_id);
- let predicates = match infcx.fully_resolve(&predicates) {
- Ok(predicates) => predicates,
- Err(fixup_err) => {
- // If we encounter a fixup error, it means that some type
- // variable wound up unconstrained. I actually don't know
- // if this can happen, and I certainly don't expect it to
- // happen often, but if it did happen it probably
- // represents a legitimate failure due to some kind of
- // unconstrained variable, and it seems better not to ICE,
- // all things considered.
- let err_msg = fixup_err_to_string(fixup_err);
- tcx.sess.span_err(span, &err_msg);
- return infcx.parameter_environment; // an unnormalized env is better than nothing
- }
- };
+ let predicates = match tcx.lift_to_global(&predicates) {
+ Some(predicates) => predicates,
+ None => return infcx.parameter_environment
+ };
- debug!("normalize_param_env_or_error: resolved predicates={:?}",
- predicates);
+ debug!("normalize_param_env_or_error: resolved predicates={:?}",
+ predicates);
- infcx.parameter_environment.with_caller_bounds(predicates)
+ infcx.parameter_environment.with_caller_bounds(predicates)
+ })
}
-pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- cause: ObligationCause<'tcx>,
- value: &T)
- -> Result<T, Vec<FulfillmentError<'tcx>>>
+pub fn fully_normalize<'a, 'gcx, 'tcx, T>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ value: &T)
+ -> Result<T, Vec<FulfillmentError<'tcx>>>
where T : TypeFoldable<'tcx>
{
debug!("fully_normalize(value={:?})", value);
Generic,
}
-pub fn is_object_safe<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> bool
-{
- // Because we query yes/no results frequently, we keep a cache:
- let def = tcx.lookup_trait_def(trait_def_id);
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn is_object_safe(self, trait_def_id: DefId) -> bool {
+ // Because we query yes/no results frequently, we keep a cache:
+ let def = self.lookup_trait_def(trait_def_id);
- let result = def.object_safety().unwrap_or_else(|| {
- let result = object_safety_violations(tcx, trait_def_id).is_empty();
+ let result = def.object_safety().unwrap_or_else(|| {
+ let result = self.object_safety_violations(trait_def_id).is_empty();
- // Record just a yes/no result in the cache; this is what is
- // queried most frequently. Note that this may overwrite a
- // previous result, but always with the same thing.
- def.set_object_safety(result);
+ // Record just a yes/no result in the cache; this is what is
+ // queried most frequently. Note that this may overwrite a
+ // previous result, but always with the same thing.
+ def.set_object_safety(result);
- result
- });
-
- debug!("is_object_safe({:?}) = {}", trait_def_id, result);
+ result
+ });
- result
-}
+ debug!("is_object_safe({:?}) = {}", trait_def_id, result);
-/// Returns the object safety violations that affect
-/// astconv - currently, Self in supertraits. This is needed
-/// because `object_safety_violations` can't be used during
-/// type collection.
-pub fn astconv_object_safety_violations<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> Vec<ObjectSafetyViolation<'tcx>>
-{
- let mut violations = vec![];
-
- if supertraits_reference_self(tcx, trait_def_id) {
- violations.push(ObjectSafetyViolation::SupertraitSelf);
+ result
}
- debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
- trait_def_id,
- violations);
-
- violations
-}
+ /// Returns the object safety violations that affect
+ /// astconv - currently, Self in supertraits. This is needed
+ /// because `object_safety_violations` can't be used during
+ /// type collection.
+ pub fn astconv_object_safety_violations(self, trait_def_id: DefId)
+ -> Vec<ObjectSafetyViolation<'tcx>>
+ {
+ let mut violations = vec![];
-pub fn object_safety_violations<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> Vec<ObjectSafetyViolation<'tcx>>
-{
- traits::supertrait_def_ids(tcx, trait_def_id)
- .flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
- .collect()
-}
+ if self.supertraits_reference_self(trait_def_id) {
+ violations.push(ObjectSafetyViolation::SupertraitSelf);
+ }
-fn object_safety_violations_for_trait<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> Vec<ObjectSafetyViolation<'tcx>>
-{
- // Check methods for violations.
- let mut violations: Vec<_> =
- tcx.trait_items(trait_def_id).iter()
- .filter_map(|item| {
- match *item {
- ty::MethodTraitItem(ref m) => {
- object_safety_violation_for_method(tcx, trait_def_id, &m)
- .map(|code| ObjectSafetyViolation::Method(m.clone(), code))
- }
- _ => None,
- }
- })
- .collect();
+ debug!("astconv_object_safety_violations(trait_def_id={:?}) = {:?}",
+ trait_def_id,
+ violations);
- // Check the trait itself.
- if trait_has_sized_self(tcx, trait_def_id) {
- violations.push(ObjectSafetyViolation::SizedSelf);
+ violations
}
- if supertraits_reference_self(tcx, trait_def_id) {
- violations.push(ObjectSafetyViolation::SupertraitSelf);
+
+ pub fn object_safety_violations(self, trait_def_id: DefId)
+ -> Vec<ObjectSafetyViolation<'tcx>>
+ {
+ traits::supertrait_def_ids(self, trait_def_id)
+ .flat_map(|def_id| self.object_safety_violations_for_trait(def_id))
+ .collect()
}
- debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
- trait_def_id,
- violations);
+ fn object_safety_violations_for_trait(self, trait_def_id: DefId)
+ -> Vec<ObjectSafetyViolation<'tcx>>
+ {
+ // Check methods for violations.
+ let mut violations: Vec<_> =
+ self.trait_items(trait_def_id).iter()
+ .filter_map(|item| {
+ match *item {
+ ty::MethodTraitItem(ref m) => {
+ self.object_safety_violation_for_method(trait_def_id, &m)
+ .map(|code| ObjectSafetyViolation::Method(m.clone(), code))
+ }
+ _ => None,
+ }
+ })
+ .collect();
- violations
-}
+ // Check the trait itself.
+ if self.trait_has_sized_self(trait_def_id) {
+ violations.push(ObjectSafetyViolation::SizedSelf);
+ }
+ if self.supertraits_reference_self(trait_def_id) {
+ violations.push(ObjectSafetyViolation::SupertraitSelf);
+ }
-pub fn supertraits_reference_self<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> bool
-{
- let trait_def = tcx.lookup_trait_def(trait_def_id);
- let trait_ref = trait_def.trait_ref.clone();
- let trait_ref = trait_ref.to_poly_trait_ref();
- let predicates = tcx.lookup_super_predicates(trait_def_id);
- predicates
- .predicates
- .into_iter()
- .map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
- .any(|predicate| {
- match predicate {
- ty::Predicate::Trait(ref data) => {
- // In the case of a trait predicate, we can skip the "self" type.
- data.0.trait_ref.substs.types.get_slice(TypeSpace)
- .iter()
- .cloned()
- .any(|t| t.has_self_ty())
- }
- ty::Predicate::Projection(..) |
- ty::Predicate::WellFormed(..) |
- ty::Predicate::ObjectSafe(..) |
- ty::Predicate::TypeOutlives(..) |
- ty::Predicate::RegionOutlives(..) |
- ty::Predicate::ClosureKind(..) |
- ty::Predicate::Rfc1592(..) |
- ty::Predicate::Equate(..) => {
- false
- }
- }
- })
-}
+ debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
+ trait_def_id,
+ violations);
-fn trait_has_sized_self<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> bool
-{
- let trait_def = tcx.lookup_trait_def(trait_def_id);
- let trait_predicates = tcx.lookup_predicates(trait_def_id);
- generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
-}
+ violations
+ }
-fn generics_require_sized_self<'tcx>(tcx: &TyCtxt<'tcx>,
- generics: &ty::Generics<'tcx>,
- predicates: &ty::GenericPredicates<'tcx>)
- -> bool
-{
- let sized_def_id = match tcx.lang_items.sized_trait() {
- Some(def_id) => def_id,
- None => { return false; /* No Sized trait, can't require it! */ }
- };
-
- // Search for a predicate like `Self : Sized` amongst the trait bounds.
- let free_substs = tcx.construct_free_substs(generics,
- tcx.region_maps.node_extent(ast::DUMMY_NODE_ID));
- let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
- elaborate_predicates(tcx, predicates)
- .any(|predicate| {
- match predicate {
- ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
- trait_pred.0.self_ty().is_self()
+ fn supertraits_reference_self(self, trait_def_id: DefId) -> bool {
+ let trait_def = self.lookup_trait_def(trait_def_id);
+ let trait_ref = trait_def.trait_ref.clone();
+ let trait_ref = trait_ref.to_poly_trait_ref();
+ let predicates = self.lookup_super_predicates(trait_def_id);
+ predicates
+ .predicates
+ .into_iter()
+ .map(|predicate| predicate.subst_supertrait(self, &trait_ref))
+ .any(|predicate| {
+ match predicate {
+ ty::Predicate::Trait(ref data) => {
+ // In the case of a trait predicate, we can skip the "self" type.
+ data.0.trait_ref.substs.types.get_slice(TypeSpace)
+ .iter()
+ .cloned()
+ .any(|t| t.has_self_ty())
+ }
+ ty::Predicate::Projection(..) |
+ ty::Predicate::WellFormed(..) |
+ ty::Predicate::ObjectSafe(..) |
+ ty::Predicate::TypeOutlives(..) |
+ ty::Predicate::RegionOutlives(..) |
+ ty::Predicate::ClosureKind(..) |
+ ty::Predicate::Rfc1592(..) |
+ ty::Predicate::Equate(..) => {
+ false
+ }
}
- ty::Predicate::Projection(..) |
- ty::Predicate::Trait(..) |
- ty::Predicate::Rfc1592(..) |
- ty::Predicate::Equate(..) |
- ty::Predicate::RegionOutlives(..) |
- ty::Predicate::WellFormed(..) |
- ty::Predicate::ObjectSafe(..) |
- ty::Predicate::ClosureKind(..) |
- ty::Predicate::TypeOutlives(..) => {
- false
- }
- }
- })
-}
-
-/// Returns `Some(_)` if this method makes the containing trait not object safe.
-fn object_safety_violation_for_method<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId,
- method: &ty::Method<'tcx>)
- -> Option<MethodViolationCode>
-{
- // Any method that has a `Self : Sized` requisite is otherwise
- // exempt from the regulations.
- if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
- return None;
+ })
}
- virtual_call_violation_for_method(tcx, trait_def_id, method)
-}
+ fn trait_has_sized_self(self, trait_def_id: DefId) -> bool {
+ let trait_def = self.lookup_trait_def(trait_def_id);
+ let trait_predicates = self.lookup_predicates(trait_def_id);
+ self.generics_require_sized_self(&trait_def.generics, &trait_predicates)
+ }
-/// We say a method is *vtable safe* if it can be invoked on a trait
-/// object. Note that object-safe traits can have some
-/// non-vtable-safe methods, so long as they require `Self:Sized` or
-/// otherwise ensure that they cannot be used when `Self=Trait`.
-pub fn is_vtable_safe_method<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId,
- method: &ty::Method<'tcx>)
+ fn generics_require_sized_self(self,
+ generics: &ty::Generics<'gcx>,
+ predicates: &ty::GenericPredicates<'gcx>)
-> bool
-{
- virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
-}
-
-/// Returns `Some(_)` if this method cannot be called on a trait
-/// object; this does not necessarily imply that the enclosing trait
-/// is not object safe, because the method might have a where clause
-/// `Self:Sized`.
-fn virtual_call_violation_for_method<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId,
- method: &ty::Method<'tcx>)
- -> Option<MethodViolationCode>
-{
- // The method's first parameter must be something that derefs (or
- // autorefs) to `&self`. For now, we only accept `self`, `&self`
- // and `Box<Self>`.
- match method.explicit_self {
- ty::ExplicitSelfCategory::Static => {
- return Some(MethodViolationCode::StaticMethod);
- }
-
- ty::ExplicitSelfCategory::ByValue |
- ty::ExplicitSelfCategory::ByReference(..) |
- ty::ExplicitSelfCategory::ByBox => {
- }
+ {
+ let sized_def_id = match self.lang_items.sized_trait() {
+ Some(def_id) => def_id,
+ None => { return false; /* No Sized trait, can't require it! */ }
+ };
+
+ // Search for a predicate like `Self : Sized` amongst the trait bounds.
+ let free_substs = self.construct_free_substs(generics,
+ self.region_maps.node_extent(ast::DUMMY_NODE_ID));
+ let predicates = predicates.instantiate(self, &free_substs).predicates.into_vec();
+ elaborate_predicates(self, predicates)
+ .any(|predicate| {
+ match predicate {
+ ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
+ trait_pred.0.self_ty().is_self()
+ }
+ ty::Predicate::Projection(..) |
+ ty::Predicate::Trait(..) |
+ ty::Predicate::Rfc1592(..) |
+ ty::Predicate::Equate(..) |
+ ty::Predicate::RegionOutlives(..) |
+ ty::Predicate::WellFormed(..) |
+ ty::Predicate::ObjectSafe(..) |
+ ty::Predicate::ClosureKind(..) |
+ ty::Predicate::TypeOutlives(..) => {
+ false
+ }
+ }
+ })
}
- // The `Self` type is erased, so it should not appear in list of
- // arguments or return type apart from the receiver.
- let ref sig = method.fty.sig;
- for &input_ty in &sig.0.inputs[1..] {
- if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
- return Some(MethodViolationCode::ReferencesSelf);
- }
- }
- if let ty::FnConverging(result_type) = sig.0.output {
- if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
- return Some(MethodViolationCode::ReferencesSelf);
+ /// Returns `Some(_)` if this method makes the containing trait not object safe.
+ fn object_safety_violation_for_method(self,
+ trait_def_id: DefId,
+ method: &ty::Method<'gcx>)
+ -> Option<MethodViolationCode>
+ {
+ // Any method that has a `Self : Sized` requisite is otherwise
+ // exempt from the regulations.
+ if self.generics_require_sized_self(&method.generics, &method.predicates) {
+ return None;
}
+
+ self.virtual_call_violation_for_method(trait_def_id, method)
}
- // We can't monomorphize things like `fn foo<A>(...)`.
- if !method.generics.types.is_empty_in(subst::FnSpace) {
- return Some(MethodViolationCode::Generic);
+ /// We say a method is *vtable safe* if it can be invoked on a trait
+ /// object. Note that object-safe traits can have some
+ /// non-vtable-safe methods, so long as they require `Self:Sized` or
+ /// otherwise ensure that they cannot be used when `Self=Trait`.
+ pub fn is_vtable_safe_method(self,
+ trait_def_id: DefId,
+ method: &ty::Method<'tcx>)
+ -> bool
+ {
+ self.virtual_call_violation_for_method(trait_def_id, method).is_none()
}
- None
-}
+ /// Returns `Some(_)` if this method cannot be called on a trait
+ /// object; this does not necessarily imply that the enclosing trait
+ /// is not object safe, because the method might have a where clause
+ /// `Self:Sized`.
+ fn virtual_call_violation_for_method(self,
+ trait_def_id: DefId,
+ method: &ty::Method<'tcx>)
+ -> Option<MethodViolationCode>
+ {
+ // The method's first parameter must be something that derefs (or
+ // autorefs) to `&self`. For now, we only accept `self`, `&self`
+ // and `Box<Self>`.
+ match method.explicit_self {
+ ty::ExplicitSelfCategory::Static => {
+ return Some(MethodViolationCode::StaticMethod);
+ }
-fn contains_illegal_self_type_reference<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId,
- ty: Ty<'tcx>)
- -> bool
-{
- // This is somewhat subtle. In general, we want to forbid
- // references to `Self` in the argument and return types,
- // since the value of `Self` is erased. However, there is one
- // exception: it is ok to reference `Self` in order to access
- // an associated type of the current trait, since we retain
- // the value of those associated types in the object type
- // itself.
- //
- // ```rust
- // trait SuperTrait {
- // type X;
- // }
- //
- // trait Trait : SuperTrait {
- // type Y;
- // fn foo(&self, x: Self) // bad
- // fn foo(&self) -> Self // bad
- // fn foo(&self) -> Option<Self> // bad
- // fn foo(&self) -> Self::Y // OK, desugars to next example
- // fn foo(&self) -> <Self as Trait>::Y // OK
- // fn foo(&self) -> Self::X // OK, desugars to next example
- // fn foo(&self) -> <Self as SuperTrait>::X // OK
- // }
- // ```
- //
- // However, it is not as simple as allowing `Self` in a projected
- // type, because there are illegal ways to use `Self` as well:
- //
- // ```rust
- // trait Trait : SuperTrait {
- // ...
- // fn foo(&self) -> <Self as SomeOtherTrait>::X;
- // }
- // ```
- //
- // Here we will not have the type of `X` recorded in the
- // object type, and we cannot resolve `Self as SomeOtherTrait`
- // without knowing what `Self` is.
-
- let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
- let mut error = false;
- ty.maybe_walk(|ty| {
- match ty.sty {
- ty::TyParam(ref param_ty) => {
- if param_ty.space == SelfSpace {
- error = true;
- }
+ ty::ExplicitSelfCategory::ByValue |
+ ty::ExplicitSelfCategory::ByReference(..) |
+ ty::ExplicitSelfCategory::ByBox => {
+ }
+ }
- false // no contained types to walk
+ // The `Self` type is erased, so it should not appear in list of
+ // arguments or return type apart from the receiver.
+ let ref sig = method.fty.sig;
+ for &input_ty in &sig.0.inputs[1..] {
+ if self.contains_illegal_self_type_reference(trait_def_id, input_ty) {
+ return Some(MethodViolationCode::ReferencesSelf);
+ }
+ }
+ if let ty::FnConverging(result_type) = sig.0.output {
+ if self.contains_illegal_self_type_reference(trait_def_id, result_type) {
+ return Some(MethodViolationCode::ReferencesSelf);
}
+ }
- ty::TyProjection(ref data) => {
- // This is a projected type `<Foo as SomeTrait>::X`.
+ // We can't monomorphize things like `fn foo<A>(...)`.
+ if !method.generics.types.is_empty_in(subst::FnSpace) {
+ return Some(MethodViolationCode::Generic);
+ }
- // Compute supertraits of current trait lazily.
- if supertraits.is_none() {
- let trait_def = tcx.lookup_trait_def(trait_def_id);
- let trait_ref = ty::Binder(trait_def.trait_ref.clone());
- supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
+ None
+ }
+
+ fn contains_illegal_self_type_reference(self,
+ trait_def_id: DefId,
+ ty: Ty<'tcx>)
+ -> bool
+ {
+ // This is somewhat subtle. In general, we want to forbid
+ // references to `Self` in the argument and return types,
+ // since the value of `Self` is erased. However, there is one
+ // exception: it is ok to reference `Self` in order to access
+ // an associated type of the current trait, since we retain
+ // the value of those associated types in the object type
+ // itself.
+ //
+ // ```rust
+ // trait SuperTrait {
+ // type X;
+ // }
+ //
+ // trait Trait : SuperTrait {
+ // type Y;
+ // fn foo(&self, x: Self) // bad
+ // fn foo(&self) -> Self // bad
+ // fn foo(&self) -> Option<Self> // bad
+ // fn foo(&self) -> Self::Y // OK, desugars to next example
+ // fn foo(&self) -> <Self as Trait>::Y // OK
+ // fn foo(&self) -> Self::X // OK, desugars to next example
+ // fn foo(&self) -> <Self as SuperTrait>::X // OK
+ // }
+ // ```
+ //
+ // However, it is not as simple as allowing `Self` in a projected
+ // type, because there are illegal ways to use `Self` as well:
+ //
+ // ```rust
+ // trait Trait : SuperTrait {
+ // ...
+ // fn foo(&self) -> <Self as SomeOtherTrait>::X;
+ // }
+ // ```
+ //
+ // Here we will not have the type of `X` recorded in the
+ // object type, and we cannot resolve `Self as SomeOtherTrait`
+ // without knowing what `Self` is.
+
+ let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
+ let mut error = false;
+ ty.maybe_walk(|ty| {
+ match ty.sty {
+ ty::TyParam(ref param_ty) => {
+ if param_ty.space == SelfSpace {
+ error = true;
+ }
+
+ false // no contained types to walk
}
- // Determine whether the trait reference `Foo as
- // SomeTrait` is in fact a supertrait of the
- // current trait. In that case, this type is
- // legal, because the type `X` will be specified
- // in the object type. Note that we can just use
- // direct equality here because all of these types
- // are part of the formal parameter listing, and
- // hence there should be no inference variables.
- let projection_trait_ref = ty::Binder(data.trait_ref.clone());
- let is_supertrait_of_current_trait =
- supertraits.as_ref().unwrap().contains(&projection_trait_ref);
-
- if is_supertrait_of_current_trait {
- false // do not walk contained types, do not report error, do collect $200
- } else {
- true // DO walk contained types, POSSIBLY reporting an error
+ ty::TyProjection(ref data) => {
+ // This is a projected type `<Foo as SomeTrait>::X`.
+
+ // Compute supertraits of current trait lazily.
+ if supertraits.is_none() {
+ let trait_def = self.lookup_trait_def(trait_def_id);
+ let trait_ref = ty::Binder(trait_def.trait_ref.clone());
+ supertraits = Some(traits::supertraits(self, trait_ref).collect());
+ }
+
+ // Determine whether the trait reference `Foo as
+ // SomeTrait` is in fact a supertrait of the
+ // current trait. In that case, this type is
+ // legal, because the type `X` will be specified
+ // in the object type. Note that we can just use
+ // direct equality here because all of these types
+ // are part of the formal parameter listing, and
+ // hence there should be no inference variables.
+ let projection_trait_ref = ty::Binder(data.trait_ref.clone());
+ let is_supertrait_of_current_trait =
+ supertraits.as_ref().unwrap().contains(&projection_trait_ref);
+
+ if is_supertrait_of_current_trait {
+ false // do not walk contained types, do not report error, do collect $200
+ } else {
+ true // DO walk contained types, POSSIBLY reporting an error
+ }
}
- }
- _ => true, // walk contained types, if any
- }
- });
+ _ => true, // walk contained types, if any
+ }
+ });
- error
+ error
+ }
}
//! Code for projecting associated types out of trait references.
use super::elaborate_predicates;
-use super::report_overflow_error;
use super::specialization_graph;
use super::translate_substs;
use super::Obligation;
/// for<...> <T as Trait>::U == V
///
/// If successful, this may result in additional obligations.
-pub fn poly_project_and_unify_type<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &PolyProjectionObligation<'tcx>)
-> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
{
let skol_obligation = obligation.with(skol_predicate);
match project_and_unify_type(selcx, &skol_obligation) {
Ok(result) => {
- match infcx.leak_check(&skol_map, snapshot) {
+ match infcx.leak_check(false, &skol_map, snapshot) {
Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)),
Err(e) => Err(MismatchedProjectionTypes { err: e }),
}
/// <T as Trait>::U == V
///
/// If successful, this may result in additional obligations.
-fn project_and_unify_type<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn project_and_unify_type<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionObligation<'tcx>)
-> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>>
{
let infcx = selcx.infcx();
let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
- match infer::mk_eqty(infcx, true, origin, normalized_ty, obligation.predicate.ty) {
+ match infcx.eq_types(true, origin, normalized_ty, obligation.predicate.ty) {
Ok(InferOk { obligations: inferred_obligations, .. }) => {
// FIXME(#32730) propagate obligations
assert!(inferred_obligations.is_empty());
}
}
-fn consider_unification_despite_ambiguity<'cx,'tcx>(selcx: &mut SelectionContext<'cx,'tcx>,
- obligation: &ProjectionObligation<'tcx>) {
+fn consider_unification_despite_ambiguity<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
+ obligation: &ProjectionObligation<'tcx>)
+{
debug!("consider_unification_despite_ambiguity(obligation={:?})",
obligation);
debug!("consider_unification_despite_ambiguity: self_ty.sty={:?}",
self_ty.sty);
match self_ty.sty {
- ty::TyClosure(closure_def_id, ref substs) => {
+ ty::TyClosure(closure_def_id, substs) => {
let closure_typer = selcx.closure_typer();
let closure_type = closure_typer.closure_type(closure_def_id, substs);
let ty::Binder((_, ret_type)) =
- util::closure_trait_ref_and_return_type(infcx.tcx,
- def_id,
- self_ty,
- &closure_type.sig,
- util::TupleArgumentsFlag::No);
+ infcx.tcx.closure_trait_ref_and_return_type(def_id,
+ self_ty,
+ &closure_type.sig,
+ util::TupleArgumentsFlag::No);
// We don't have to normalize the return type here - this is only
// reached for TyClosure: Fn inputs where the closure kind is
// still unknown, which should only occur in typeck where the
ret_type);
let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
let obligation_ty = obligation.predicate.ty;
- match infer::mk_eqty(infcx, true, origin, obligation_ty, ret_type) {
+ match infcx.eq_types(true, origin, obligation_ty, ret_type) {
Ok(InferOk { obligations, .. }) => {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
/// them with a fully resolved type where possible. The return value
/// combines the normalized result and any additional obligations that
/// were incurred as result.
-pub fn normalize<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
- cause: ObligationCause<'tcx>,
- value: &T)
- -> Normalized<'tcx, T>
+pub fn normalize<'a, 'b, 'gcx, 'tcx, T>(selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ value: &T)
+ -> Normalized<'tcx, T>
where T : TypeFoldable<'tcx>
{
normalize_with_depth(selcx, cause, 0, value)
}
/// As `normalize`, but with a custom depth.
-pub fn normalize_with_depth<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
- cause: ObligationCause<'tcx>,
- depth: usize,
- value: &T)
- -> Normalized<'tcx, T>
+pub fn normalize_with_depth<'a, 'b, 'gcx, 'tcx, T>(
+ selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ value: &T)
+ -> Normalized<'tcx, T>
+
where T : TypeFoldable<'tcx>
{
let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth);
}
}
-struct AssociatedTypeNormalizer<'a,'b:'a,'tcx:'b> {
- selcx: &'a mut SelectionContext<'b,'tcx>,
+struct AssociatedTypeNormalizer<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
+ selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
cause: ObligationCause<'tcx>,
obligations: Vec<PredicateObligation<'tcx>>,
depth: usize,
}
-impl<'a,'b,'tcx> AssociatedTypeNormalizer<'a,'b,'tcx> {
- fn new(selcx: &'a mut SelectionContext<'b,'tcx>,
+impl<'a, 'b, 'gcx, 'tcx> AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx> {
+ fn new(selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
cause: ObligationCause<'tcx>,
depth: usize)
- -> AssociatedTypeNormalizer<'a,'b,'tcx>
+ -> AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx>
{
AssociatedTypeNormalizer {
selcx: selcx,
}
}
-impl<'a,'b,'tcx> TypeFolder<'tcx> for AssociatedTypeNormalizer<'a,'b,'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> {
+impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx> {
+ fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'tcx> {
self.selcx.tcx()
}
/// there are unresolved type variables in the projection, we will
/// substitute a fresh type variable `$X` and generate a new
/// obligation `<T as Trait>::Item == $X` for later.
-pub fn normalize_projection_type<'a,'b,'tcx>(
- selcx: &'a mut SelectionContext<'b,'tcx>,
+pub fn normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
+ selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
depth: usize)
/// as Trait>::Item`. The result is always a type (and possibly
/// additional obligations). Returns `None` in the case of ambiguity,
/// which indicates that there are unbound type variables.
-fn opt_normalize_projection_type<'a,'b,'tcx>(
- selcx: &'a mut SelectionContext<'b,'tcx>,
+fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
+ selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
depth: usize)
/// an error for this obligation, but we legitimately should not,
/// because it contains `[type error]`. Yuck! (See issue #29857 for
/// one case where this arose.)
-fn normalize_to_error<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
- projection_ty: ty::ProjectionTy<'tcx>,
- cause: ObligationCause<'tcx>,
- depth: usize)
- -> NormalizedTy<'tcx>
+fn normalize_to_error<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize)
+ -> NormalizedTy<'tcx>
{
let trait_ref = projection_ty.trait_ref.to_poly_trait_ref();
let trait_obligation = Obligation { cause: cause,
}
/// Compute the result of a projection type (if we can).
-fn project_type<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn project_type<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>)
-> Result<ProjectedTy<'tcx>, ProjectionTyError<'tcx>>
{
let recursion_limit = selcx.tcx().sess.recursion_limit.get();
if obligation.recursion_depth >= recursion_limit {
debug!("project: overflow!");
- report_overflow_error(selcx.infcx(), &obligation, true);
+ selcx.infcx().report_overflow_error(&obligation, true);
}
let obligation_trait_ref =
/// The first thing we have to do is scan through the parameter
/// environment to see whether there are any projection predicates
/// there that can answer this question.
-fn assemble_candidates_from_param_env<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn assemble_candidates_from_param_env<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
/// ```
///
/// Here, for example, we could conclude that the result is `i32`.
-fn assemble_candidates_from_trait_def<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn assemble_candidates_from_trait_def<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
bounds)
}
-fn assemble_candidates_from_predicates<'cx,'tcx,I>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn assemble_candidates_from_predicates<'cx, 'gcx, 'tcx, I>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>,
}
}
-fn assemble_candidates_from_object_type<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn assemble_candidates_from_object_type<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
env_predicates)
}
-fn assemble_candidates_from_impls<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate_set: &mut ProjectionTyCandidateSet<'tcx>)
Ok(())
}
-fn confirm_candidate<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn confirm_candidate<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
candidate: ProjectionTyCandidate<'tcx>)
-> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
}
}
-fn confirm_fn_pointer_candidate<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn confirm_fn_pointer_candidate<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
fn_type: Ty<'tcx>)
-> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes)
}
-fn confirm_closure_candidate<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn confirm_closure_candidate<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
vtable: VtableClosureData<'tcx, PredicateObligation<'tcx>>)
-> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
{
let closure_typer = selcx.closure_typer();
- let closure_type = closure_typer.closure_type(vtable.closure_def_id, &vtable.substs);
+ let closure_type = closure_typer.closure_type(vtable.closure_def_id, vtable.substs);
let Normalized {
value: closure_type,
mut obligations
(ty, obligations)
}
-fn confirm_callable_candidate<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn confirm_callable_candidate<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
fn_sig: &ty::PolyFnSig<'tcx>,
flag: util::TupleArgumentsFlag)
// Note: we unwrap the binder here but re-create it below (1)
let ty::Binder((trait_ref, ret_type)) =
- util::closure_trait_ref_and_return_type(tcx,
- fn_once_def_id,
- obligation.predicate.trait_ref.self_ty(),
- fn_sig,
- flag);
+ tcx.closure_trait_ref_and_return_type(fn_once_def_id,
+ obligation.predicate.trait_ref.self_ty(),
+ fn_sig,
+ flag);
let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here
projection_ty: ty::ProjectionTy {
confirm_param_env_candidate(selcx, obligation, predicate)
}
-fn confirm_param_env_candidate<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
poly_projection: ty::PolyProjectionPredicate<'tcx>)
-> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
(projection.ty, vec!())
}
-fn confirm_impl_candidate<'cx,'tcx>(
- selcx: &mut SelectionContext<'cx,'tcx>,
+fn confirm_impl_candidate<'cx, 'gcx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>)
-> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
///
/// Based on the "projection mode", this lookup may in fact only examine the
/// topmost impl. See the comments for `ProjectionMode` for more details.
-fn assoc_ty_def<'cx, 'tcx>(selcx: &SelectionContext<'cx, 'tcx>,
- impl_def_id: DefId,
- assoc_ty_name: ast::Name)
- -> Option<specialization_graph::NodeItem<Rc<ty::AssociatedType<'tcx>>>>
+fn assoc_ty_def<'cx, 'gcx, 'tcx>(
+ selcx: &SelectionContext<'cx, 'gcx, 'tcx>,
+ impl_def_id: DefId,
+ assoc_ty_name: ast::Name)
+ -> Option<specialization_graph::NodeItem<Rc<ty::AssociatedType<'tcx>>>>
{
let trait_def_id = selcx.tcx().impl_trait_ref(impl_def_id).unwrap().def_id;
use super::project;
use super::project::{normalize_with_depth, Normalized};
use super::{PredicateObligation, TraitObligation, ObligationCause};
-use super::report_overflow_error;
use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation};
use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch};
use super::{ObjectCastObligation, Obligation};
VtableFnPointer, VtableObject, VtableDefaultImpl};
use super::{VtableImplData, VtableObjectData, VtableBuiltinData,
VtableClosureData, VtableDefaultImplData};
-use super::object_safety;
use super::util;
use hir::def_id::DefId;
use hir;
use util::nodemap::FnvHashMap;
-pub struct SelectionContext<'cx, 'tcx:'cx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
+ infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
/// Freshener used specifically for skolemizing entries on the
/// obligation stack. This ensures that all entries on the stack
/// at one time will have the same set of skolemized entries,
/// which is important for checking for trait bounds that
/// recursively require themselves.
- freshener: TypeFreshener<'cx, 'tcx>,
+ freshener: TypeFreshener<'cx, 'gcx, 'tcx>,
/// If true, indicates that the evaluation should be conservative
/// and consider the possibility of types outside this crate.
/// Implementation of a `Fn`-family trait by one of the anonymous types
/// generated for a `||` expression. The ty::ClosureKind informs the
/// confirmation step what ClosureKind obligation to emit.
- ClosureCandidate(/* closure */ DefId, &'tcx ty::ClosureSubsts<'tcx>, ty::ClosureKind),
+ ClosureCandidate(/* closure */ DefId, ty::ClosureSubsts<'tcx>, ty::ClosureKind),
/// Implementation of a `Fn`-family trait by one of the anonymous
/// types generated for a fn pointer type (e.g., `fn(int)->int`)
BuiltinUnsizeCandidate,
}
+impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> {
+ type Lifted = SelectionCandidate<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ Some(match *self {
+ BuiltinCandidate { has_nested } => {
+ BuiltinCandidate {
+ has_nested: has_nested
+ }
+ }
+ ImplCandidate(def_id) => ImplCandidate(def_id),
+ DefaultImplCandidate(def_id) => DefaultImplCandidate(def_id),
+ DefaultImplObjectCandidate(def_id) => {
+ DefaultImplObjectCandidate(def_id)
+ }
+ ProjectionCandidate => ProjectionCandidate,
+ FnPointerCandidate => FnPointerCandidate,
+ ObjectCandidate => ObjectCandidate,
+ BuiltinObjectCandidate => BuiltinObjectCandidate,
+ BuiltinUnsizeCandidate => BuiltinUnsizeCandidate,
+
+ ParamCandidate(ref trait_ref) => {
+ return tcx.lift(trait_ref).map(ParamCandidate);
+ }
+ ClosureCandidate(def_id, ref substs, kind) => {
+ return tcx.lift(substs).map(|substs| {
+ ClosureCandidate(def_id, substs, kind)
+ });
+ }
+ })
+ }
+}
+
struct SelectionCandidateSet<'tcx> {
// a list of candidates that definitely apply to the current
// obligation (meaning: types unify).
hashmap: RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, EvaluationResult>>
}
-impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
- pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> {
+ pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> {
SelectionContext {
infcx: infcx,
freshener: infcx.freshener(),
}
}
- pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+ pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> {
SelectionContext {
infcx: infcx,
freshener: infcx.freshener(),
}
}
- pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+ pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> {
self.infcx
}
- pub fn tcx(&self) -> &'cx TyCtxt<'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> {
self.infcx.tcx
}
- pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'cx, 'tcx> {
+ pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'tcx> {
self.infcx.param_env()
}
- pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+ pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> {
self.infcx
}
}
ty::Predicate::ObjectSafe(trait_def_id) => {
- if object_safety::is_object_safe(self.tcx(), trait_def_id) {
+ if self.tcx().is_object_safe(trait_def_id) {
EvaluatedToOk
} else {
EvaluatedToErr
result
}
- fn pick_evaluation_cache(&self) -> &EvaluationCache<'tcx> {
- // see comment in `pick_candidate_cache`
- if self.intercrate ||
- !self.param_env().caller_bounds.is_empty()
- {
- &self.param_env().evaluation_cache
- } else
- {
- &self.tcx().evaluation_cache
- }
- }
-
fn check_evaluation_cache(&self, trait_ref: ty::PolyTraitRef<'tcx>)
-> Option<EvaluationResult>
{
- let cache = self.pick_evaluation_cache();
- cache.hashmap.borrow().get(&trait_ref).cloned()
+ if self.can_use_global_caches() {
+ let cache = self.tcx().evaluation_cache.hashmap.borrow();
+ if let Some(cached) = cache.get(&trait_ref) {
+ return Some(cached.clone());
+ }
+ }
+ self.infcx.evaluation_cache.hashmap.borrow().get(&trait_ref).cloned()
}
fn insert_evaluation_cache(&mut self,
return;
}
- let cache = self.pick_evaluation_cache();
- cache.hashmap.borrow_mut().insert(trait_ref, result);
+ if self.can_use_global_caches() {
+ let mut cache = self.tcx().evaluation_cache.hashmap.borrow_mut();
+ if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) {
+ cache.insert(trait_ref, result);
+ return;
+ }
+ }
+
+ self.infcx.evaluation_cache.hashmap.borrow_mut().insert(trait_ref, result);
}
///////////////////////////////////////////////////////////////////////////
// not update) the cache.
let recursion_limit = self.infcx.tcx.sess.recursion_limit.get();
if stack.obligation.recursion_depth >= recursion_limit {
- report_overflow_error(self.infcx(), &stack.obligation, true);
+ self.infcx().report_overflow_error(&stack.obligation, true);
}
// Check the cache. Note that we skolemize the trait-ref
coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
}
- fn pick_candidate_cache(&self) -> &SelectionCache<'tcx> {
+ /// Returns true if the global caches can be used.
+ /// Do note that if the type itself is not in the
+ /// global tcx, the local caches will be used.
+ fn can_use_global_caches(&self) -> bool {
// If there are any where-clauses in scope, then we always use
// a cache local to this particular scope. Otherwise, we
// switch to a global cache. We used to try and draw
// rule seems to be pretty clearly safe and also still retains
// a very high hit rate (~95% when compiling rustc).
if !self.param_env().caller_bounds.is_empty() {
- return &self.param_env().selection_cache;
+ return false;
}
// Avoid using the master cache during coherence and just rely
// it's not worth going to more trouble to increase the
// hit-rate I don't think.
if self.intercrate {
- return &self.param_env().selection_cache;
+ return false;
}
// Otherwise, we can use the global cache.
- &self.tcx().selection_cache
+ true
}
fn check_candidate_cache(&mut self,
cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>)
-> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>>
{
- let cache = self.pick_candidate_cache();
- let hashmap = cache.hashmap.borrow();
- hashmap.get(&cache_fresh_trait_pred.0.trait_ref).cloned()
+ let trait_ref = &cache_fresh_trait_pred.0.trait_ref;
+ if self.can_use_global_caches() {
+ let cache = self.tcx().selection_cache.hashmap.borrow();
+ if let Some(cached) = cache.get(&trait_ref) {
+ return Some(cached.clone());
+ }
+ }
+ self.infcx.selection_cache.hashmap.borrow().get(trait_ref).cloned()
}
fn insert_candidate_cache(&mut self,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>)
{
- let cache = self.pick_candidate_cache();
- let mut hashmap = cache.hashmap.borrow_mut();
- hashmap.insert(cache_fresh_trait_pred.0.trait_ref.clone(), candidate);
+ let trait_ref = cache_fresh_trait_pred.0.trait_ref;
+ if self.can_use_global_caches() {
+ let mut cache = self.tcx().selection_cache.hashmap.borrow_mut();
+ if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) {
+ if let Some(candidate) = self.tcx().lift_to_global(&candidate) {
+ cache.insert(trait_ref, candidate);
+ return;
+ }
+ }
+ }
+
+ self.infcx.selection_cache.hashmap.borrow_mut().insert(trait_ref, candidate);
}
fn should_update_candidate_cache(&mut self,
Err(_) => { return false; }
}
- self.infcx.leak_check(skol_map, snapshot).is_ok()
+ self.infcx.leak_check(false, skol_map, snapshot).is_ok()
}
/// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
// type/region parameters
let self_ty = *obligation.self_ty().skip_binder();
let (closure_def_id, substs) = match self_ty.sty {
- ty::TyClosure(id, ref substs) => (id, substs),
+ ty::TyClosure(id, substs) => (id, substs),
ty::TyInfer(ty::TyVar(_)) => {
debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
candidates.ambiguous = true;
// these cases wind up being considered ambiguous due to a
// (spurious) ambiguity introduced here.
let predicate_trait_ref = obligation.predicate.to_poly_trait_ref();
- if !object_safety::is_object_safe(self.tcx(), predicate_trait_ref.def_id()) {
+ if !self.tcx().is_object_safe(predicate_trait_ref.def_id()) {
return;
}
// i.e. EvaluatedToOk:
if other.evaluation == EvaluatedToOk {
if let ImplCandidate(victim_def) = victim.candidate {
- return traits::specializes(self.tcx(), other_def, victim_def);
+ let tcx = self.tcx().global_tcx();
+ return traits::specializes(tcx, other_def, victim_def);
}
}
ty::TyStr | ty::TySlice(_) | ty::TyTrait(..) => Never,
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(tys) => {
// FIXME(#33242) we only need to constrain the last field
- Where(ty::Binder(tys.clone()))
+ Where(ty::Binder(tys.to_vec()))
}
ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(ty::Binder(match sized_crit.sty {
- ty::TyTuple(ref tys) => tys.to_owned().subst(self.tcx(), substs),
+ ty::TyTuple(tys) => tys.to_vec().subst(self.tcx(), substs),
ty::TyBool => vec![],
_ => vec![sized_crit.subst(self.tcx(), substs)]
}))
Where(ty::Binder(vec![element_ty]))
}
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(tys) => {
// (*) binder moved here
- Where(ty::Binder(tys.clone()))
+ Where(ty::Binder(tys.to_vec()))
}
ty::TyStruct(..) | ty::TyEnum(..) | ty::TyProjection(..) | ty::TyParam(..) => {
ty::TyTuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
- tys.clone()
+ tys.to_vec()
}
ty::TyClosure(_, ref substs) => {
// OIBIT interact? That is, there is no way to say
// "make me invariant with respect to this TYPE, but
// do not act as though I can reach it"
- substs.upvar_tys.clone()
+ substs.upvar_tys.to_vec()
}
// for `PhantomData<T>`, we pass `T`
recursion_depth,
&skol_ty);
let skol_obligation =
- util::predicate_for_trait_def(self.tcx(),
+ self.tcx().predicate_for_trait_def(
cause.clone(),
trait_def_id,
recursion_depth,
fn vtable_impl(&mut self,
impl_def_id: DefId,
- mut substs: Normalized<'tcx, Substs<'tcx>>,
+ mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
skol_map: infer::SkolemizationMap,
impl_obligations.append(&mut substs.obligations);
VtableImplData { impl_def_id: impl_def_id,
- substs: self.tcx().mk_substs(substs.value),
+ substs: substs.value,
nested: impl_obligations }
}
// entries, so that we can compute the offset for the selected
// trait.
vtable_base =
- nonmatching.map(|t| util::count_own_vtable_entries(self.tcx(), t))
+ nonmatching.map(|t| self.tcx().count_own_vtable_entries(t))
.sum();
}
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let sig = self_ty.fn_sig();
let trait_ref =
- util::closure_trait_ref_and_return_type(self.tcx(),
- obligation.predicate.def_id(),
- self_ty,
- sig,
- util::TupleArgumentsFlag::Yes)
+ self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(),
+ self_ty,
+ sig,
+ util::TupleArgumentsFlag::Yes)
.map_bound(|(trait_ref, _)| trait_ref);
self.confirm_poly_trait_refs(obligation.cause.clone(),
fn confirm_closure_candidate(&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
- substs: &ty::ClosureSubsts<'tcx>,
+ substs: ty::ClosureSubsts<'tcx>,
kind: ty::ClosureKind)
-> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>,
SelectionError<'tcx>>
// })
// .chain(Some(data.principal_def_id()));
if let Some(did) = object_dids.find(|did| {
- !object_safety::is_object_safe(tcx, *did)
+ !tcx.is_object_safe(*did)
}) {
return Err(TraitNotObjectSafe(did))
}
// object type is Foo+Send, this would create an obligation
// for the Send check.)
for bound in &builtin_bounds {
- if let Ok(tr) = util::trait_ref_for_builtin_bound(tcx, bound, source) {
+ if let Ok(tr) = tcx.trait_ref_for_builtin_bound(bound, source) {
push(tr.to_predicate());
} else {
return Err(Unimplemented);
assert!(obligations.is_empty());
// Construct the nested Field<T>: Unsize<Field<U>> predicate.
- nested.push(util::predicate_for_trait_def(tcx,
+ nested.push(tcx.predicate_for_trait_def(
obligation.cause.clone(),
obligation.predicate.def_id(),
obligation.recursion_depth + 1,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot)
- -> (Normalized<'tcx, Substs<'tcx>>, infer::SkolemizationMap)
+ -> (Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap)
{
match self.match_impl(impl_def_id, obligation, snapshot) {
Ok((substs, skol_map)) => (substs, skol_map),
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot)
- -> Result<(Normalized<'tcx, Substs<'tcx>>,
+ -> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>,
infer::SkolemizationMap), ()>
{
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
- if let Err(e) = self.infcx.leak_check(&skol_map, snapshot) {
+ if let Err(e) = self.infcx.leak_check(false, &skol_map, snapshot) {
debug!("match_impl: failed leak check due to `{}`", e);
return Err(());
}
fn closure_trait_ref_unnormalized(&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
- substs: &ty::ClosureSubsts<'tcx>)
+ substs: ty::ClosureSubsts<'tcx>)
-> ty::PolyTraitRef<'tcx>
{
let closure_type = self.infcx.closure_type(closure_def_id, substs);
let ty::Binder((trait_ref, _)) =
- util::closure_trait_ref_and_return_type(self.tcx(),
- obligation.predicate.def_id(),
- obligation.predicate.0.self_ty(), // (1)
- &closure_type.sig,
- util::TupleArgumentsFlag::No);
+ self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(),
+ obligation.predicate.0.self_ty(), // (1)
+ &closure_type.sig,
+ util::TupleArgumentsFlag::No);
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an unboxed closure type and hence is
// in fact unparameterized (or at least does not reference any
fn closure_trait_ref(&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
- substs: &ty::ClosureSubsts<'tcx>)
+ substs: ty::ClosureSubsts<'tcx>)
-> Normalized<'tcx, ty::PolyTraitRef<'tcx>>
{
let trait_ref = self.closure_trait_ref_unnormalized(
use super::util::{fresh_type_vars_for_impl, impl_trait_ref_and_oblig};
use hir::def_id::DefId;
-use infer::{self, InferCtxt, TypeOrigin};
+use infer::{InferCtxt, TypeOrigin};
use middle::region;
use ty::subst::{Subst, Substs};
use traits::{self, ProjectionMode, ObligationCause, Normalized};
pub mod specialization_graph;
/// Information pertinent to an overlapping impl error.
-pub struct Overlap<'a, 'tcx: 'a> {
- pub in_context: InferCtxt<'a, 'tcx>,
+pub struct OverlapError {
pub with_impl: DefId,
- pub on_trait_ref: ty::TraitRef<'tcx>,
+ pub trait_desc: String,
+ pub self_desc: Option<String>
}
/// Given a subst for the requested impl, translate it to a subst
/// through associated type projection. We deal with such cases by using
/// *fulfillment* to relate the two impls, requiring that all projections are
/// resolved.
-pub fn translate_substs<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- source_impl: DefId,
- source_substs: &'tcx Substs<'tcx>,
- target_node: specialization_graph::Node)
- -> &'tcx Substs<'tcx> {
+pub fn translate_substs<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ source_impl: DefId,
+ source_substs: &'tcx Substs<'tcx>,
+ target_node: specialization_graph::Node)
+ -> &'tcx Substs<'tcx> {
let source_trait_ref = infcx.tcx
.impl_trait_ref(source_impl)
.unwrap()
specializaiton failed to hold")
})
}
- specialization_graph::Node::Trait(..) => source_trait_ref.substs.clone(),
+ specialization_graph::Node::Trait(..) => source_trait_ref.substs,
};
// directly inherent the method generics, since those do not vary across impls
/// Specialization is determined by the sets of types to which the impls apply;
/// impl1 specializes impl2 if it applies to a subset of the types impl2 applies
/// to.
-pub fn specializes(tcx: &TyCtxt, impl1_def_id: DefId, impl2_def_id: DefId) -> bool {
+pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId) -> bool {
// The feature gate should prevent introducing new specializations, but not
// taking advantage of upstream ones.
if !tcx.sess.features.borrow().specialization &&
return false;
}
- let mut infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Topmost);
-
// create a parameter environment corresponding to a (skolemized) instantiation of impl1
let scheme = tcx.lookup_item_type(impl1_def_id);
let predicates = tcx.lookup_predicates(impl1_def_id);
.unwrap()
.subst(tcx, &penv.free_substs);
- // Normalize the trait reference, adding any obligations that arise into the impl1 assumptions
- let Normalized { value: impl1_trait_ref, obligations: normalization_obligations } = {
- let selcx = &mut SelectionContext::new(&infcx);
- traits::normalize(selcx, ObligationCause::dummy(), &impl1_trait_ref)
- };
- penv.caller_bounds.extend(normalization_obligations.into_iter().map(|o| o.predicate));
+ tcx.normalizing_infer_ctxt(ProjectionMode::Topmost).enter(|mut infcx| {
+ // Normalize the trait reference, adding any obligations
+ // that arise into the impl1 assumptions.
+ let Normalized { value: impl1_trait_ref, obligations: normalization_obligations } = {
+ let selcx = &mut SelectionContext::new(&infcx);
+ traits::normalize(selcx, ObligationCause::dummy(), &impl1_trait_ref)
+ };
+ penv.caller_bounds.extend(normalization_obligations.into_iter().map(|o| {
+ match tcx.lift_to_global(&o.predicate) {
+ Some(predicate) => predicate,
+ None => {
+ bug!("specializes: obligation `{:?}` has inference types/regions", o);
+ }
+ }
+ }));
- // Install the parameter environment, taking the predicates of impl1 as assumptions:
- infcx.parameter_environment = penv;
+ // Install the parameter environment, taking the predicates of impl1 as assumptions:
+ infcx.parameter_environment = penv;
- // Attempt to prove that impl2 applies, given all of the above.
- fulfill_implication(&infcx, impl1_trait_ref, impl2_def_id).is_ok()
+ // Attempt to prove that impl2 applies, given all of the above.
+ fulfill_implication(&infcx, impl1_trait_ref, impl2_def_id).is_ok()
+ })
}
/// Attempt to fulfill all obligations of `target_impl` after unification with
/// generics of `target_impl`, including both those needed to unify with
/// `source_trait_ref` and those whose identity is determined via a where
/// clause in the impl.
-fn fulfill_implication<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- source_trait_ref: ty::TraitRef<'tcx>,
- target_impl: DefId)
- -> Result<Substs<'tcx>, ()> {
+fn fulfill_implication<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ source_trait_ref: ty::TraitRef<'tcx>,
+ target_impl: DefId)
+ -> Result<&'tcx Substs<'tcx>, ()> {
infcx.commit_if_ok(|_| {
let selcx = &mut SelectionContext::new(&infcx);
let target_substs = fresh_type_vars_for_impl(&infcx, DUMMY_SP, target_impl);
&target_substs);
// do the impls unify? If not, no specialization.
- if let Err(_) = infer::mk_eq_trait_refs(&infcx,
- true,
- TypeOrigin::Misc(DUMMY_SP),
- source_trait_ref,
- target_trait_ref) {
+ if let Err(_) = infcx.eq_trait_refs(true,
+ TypeOrigin::Misc(DUMMY_SP),
+ source_trait_ref,
+ target_trait_ref) {
debug!("fulfill_implication: {:?} does not unify with {:?}",
source_trait_ref,
target_trait_ref);
fulfill_cx.register_predicate_obligation(&infcx, oblig);
}
- if let Err(errors) = infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()) {
+ if let Err(errors) = infcx.drain_fulfillment_cx(&mut fulfill_cx, &()) {
// no dice!
debug!("fulfill_implication: for impls on {:?} and {:?}, could not fulfill: {:?} given \
{:?}",
use std::cell;
use std::rc::Rc;
-use super::{Overlap, specializes};
+use super::{OverlapError, specializes};
use hir::def_id::DefId;
-use infer;
use traits::{self, ProjectionMode};
use ty::{self, TyCtxt, ImplOrTraitItem, TraitDef, TypeFoldable};
use ty::fast_reject::{self, SimplifiedType};
}
/// The result of attempting to insert an impl into a group of children.
-enum InsertResult<'a, 'tcx: 'a> {
+enum Inserted {
/// The impl was inserted as a new child in this group of children.
BecameNewSibling,
/// The impl is a specialization of an existing child.
ShouldRecurseOn(DefId),
-
- /// The impl has an unresolvable overlap with an existing child (neither
- /// specializes the other).
- Overlapped(Overlap<'a, 'tcx>),
}
-impl Children {
+impl<'a, 'gcx, 'tcx> Children {
fn new() -> Children {
Children {
nonblanket_impls: FnvHashMap(),
}
/// Insert an impl into this set of children without comparing to any existing impls
- fn insert_blindly(&mut self, tcx: &TyCtxt, impl_def_id: DefId) {
+ fn insert_blindly(&mut self,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
self.nonblanket_impls.entry(sty).or_insert(vec![]).push(impl_def_id)
/// Attempt to insert an impl into this set of children, while comparing for
/// specialiation relationships.
- fn insert<'a, 'tcx>(&mut self,
- tcx: &'a TyCtxt<'tcx>,
- impl_def_id: DefId,
- simplified_self: Option<SimplifiedType>)
- -> InsertResult<'a, 'tcx>
+ fn insert(&mut self,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId,
+ simplified_self: Option<SimplifiedType>)
+ -> Result<Inserted, OverlapError>
{
for slot in match simplified_self {
Some(sty) => self.filtered_mut(sty),
} {
let possible_sibling = *slot;
- let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::Topmost);
- let overlap = traits::overlapping_impls(&infcx, possible_sibling, impl_def_id);
-
- if let Some(impl_header) = overlap {
- let le = specializes(tcx, impl_def_id, possible_sibling);
- let ge = specializes(tcx, possible_sibling, impl_def_id);
+ let tcx = tcx.global_tcx();
+ let (le, ge) = tcx.infer_ctxt(None, None,
+ ProjectionMode::Topmost).enter(|infcx| {
+ let overlap = traits::overlapping_impls(&infcx,
+ possible_sibling,
+ impl_def_id);
+ if let Some(impl_header) = overlap {
+ let le = specializes(tcx, impl_def_id, possible_sibling);
+ let ge = specializes(tcx, possible_sibling, impl_def_id);
+
+ if le == ge {
+ // overlap, but no specialization; error out
+ let trait_ref = impl_header.trait_ref.unwrap();
+ Err(OverlapError {
+ with_impl: possible_sibling,
+ trait_desc: trait_ref.to_string(),
+ self_desc: trait_ref.substs.self_ty().and_then(|ty| {
+ // only report the Self type if it has at least
+ // some outer concrete shell; otherwise, it's
+ // not adding much information.
+ if ty.has_concrete_skeleton() {
+ Some(ty.to_string())
+ } else {
+ None
+ }
+ })
+ })
+ } else {
+ Ok((le, ge))
+ }
+ } else {
+ Ok((false, false))
+ }
+ })?;
- if le && !ge {
- debug!("descending as child of TraitRef {:?}",
- tcx.impl_trait_ref(possible_sibling).unwrap());
+ if le && !ge {
+ debug!("descending as child of TraitRef {:?}",
+ tcx.impl_trait_ref(possible_sibling).unwrap());
- // the impl specializes possible_sibling
- return InsertResult::ShouldRecurseOn(possible_sibling);
- } else if ge && !le {
- debug!("placing as parent of TraitRef {:?}",
- tcx.impl_trait_ref(possible_sibling).unwrap());
+ // the impl specializes possible_sibling
+ return Ok(Inserted::ShouldRecurseOn(possible_sibling));
+ } else if ge && !le {
+ debug!("placing as parent of TraitRef {:?}",
+ tcx.impl_trait_ref(possible_sibling).unwrap());
// possible_sibling specializes the impl
*slot = impl_def_id;
- return InsertResult::Replaced(possible_sibling);
- } else {
- // overlap, but no specialization; error out
- return InsertResult::Overlapped(Overlap {
- with_impl: possible_sibling,
- on_trait_ref: impl_header.trait_ref.unwrap(),
- in_context: infcx,
- });
- }
+ return Ok(Inserted::Replaced(possible_sibling));
+ } else {
+ // no overlap (error bailed already via ?)
}
}
// no overlap with any potential siblings, so add as a new sibling
debug!("placing as new sibling");
self.insert_blindly(tcx, impl_def_id);
- InsertResult::BecameNewSibling
+ Ok(Inserted::BecameNewSibling)
}
- fn iter_mut<'a>(&'a mut self) -> Box<Iterator<Item = &'a mut DefId> + 'a> {
+ fn iter_mut(&'a mut self) -> Box<Iterator<Item = &'a mut DefId> + 'a> {
let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter_mut());
Box::new(self.blanket_impls.iter_mut().chain(nonblanket))
}
- fn filtered_mut<'a>(&'a mut self, sty: SimplifiedType)
- -> Box<Iterator<Item = &'a mut DefId> + 'a> {
+ fn filtered_mut(&'a mut self, sty: SimplifiedType)
+ -> Box<Iterator<Item = &'a mut DefId> + 'a> {
let nonblanket = self.nonblanket_impls.entry(sty).or_insert(vec![]).iter_mut();
Box::new(self.blanket_impls.iter_mut().chain(nonblanket))
}
}
-impl Graph {
+impl<'a, 'gcx, 'tcx> Graph {
pub fn new() -> Graph {
Graph {
parent: Default::default(),
/// Insert a local impl into the specialization graph. If an existing impl
/// conflicts with it (has overlap, but neither specializes the other),
/// information about the area of overlap is returned in the `Err`.
- pub fn insert<'a, 'tcx>(&mut self,
- tcx: &'a TyCtxt<'tcx>,
- impl_def_id: DefId)
- -> Result<(), Overlap<'a, 'tcx>> {
+ pub fn insert(&mut self,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId)
+ -> Result<(), OverlapError> {
assert!(impl_def_id.is_local());
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
// Descend the specialization tree, where `parent` is the current parent node
loop {
- use self::InsertResult::*;
+ use self::Inserted::*;
let insert_result = self.children.entry(parent).or_insert(Children::new())
- .insert(tcx, impl_def_id, simplified);
+ .insert(tcx, impl_def_id, simplified)?;
match insert_result {
BecameNewSibling => {
ShouldRecurseOn(new_parent) => {
parent = new_parent;
}
- Overlapped(error) => {
- return Err(error);
- }
}
}
}
/// Insert cached metadata mapping from a child impl back to its parent.
- pub fn record_impl_from_cstore(&mut self, tcx: &TyCtxt, parent: DefId, child: DefId) {
+ pub fn record_impl_from_cstore(&mut self,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ parent: DefId,
+ child: DefId) {
if self.parent.insert(child, parent).is_some() {
bug!("When recording an impl from the crate store, information about its parent \
was already present.");
Trait(DefId),
}
-impl Node {
+impl<'a, 'gcx, 'tcx> Node {
pub fn is_from_trait(&self) -> bool {
match *self {
Node::Trait(..) => true,
}
/// Iterate over the items defined directly by the given (impl or trait) node.
- pub fn items<'a, 'tcx>(&self, tcx: &'a TyCtxt<'tcx>) -> NodeItems<'a, 'tcx> {
+ pub fn items(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> NodeItems<'a, 'gcx> {
match *self {
Node::Impl(impl_def_id) => {
NodeItems::Impl {
- tcx: tcx,
+ tcx: tcx.global_tcx(),
items: cell::Ref::map(tcx.impl_items.borrow(),
|impl_items| &impl_items[&impl_def_id]),
idx: 0,
/// An iterator over the items defined within a trait or impl.
pub enum NodeItems<'a, 'tcx: 'a> {
Impl {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
items: cell::Ref<'a, Vec<ty::ImplOrTraitItemId>>,
idx: usize,
},
}
}
-impl<'a, 'tcx> Ancestors<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Ancestors<'a, 'tcx> {
/// Search the items from the given ancestors, returning each type definition
/// with the given name.
- pub fn type_defs(self, tcx: &'a TyCtxt<'tcx>, name: Name) -> TypeDefs<'a, 'tcx> {
+ pub fn type_defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, name: Name) -> TypeDefs<'a, 'gcx> {
let iter = self.flat_map(move |node| {
node.items(tcx)
.filter_map(move |item| {
/// Search the items from the given ancestors, returning each fn definition
/// with the given name.
- pub fn fn_defs(self, tcx: &'a TyCtxt<'tcx>, name: Name) -> FnDefs<'a, 'tcx> {
+ pub fn fn_defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, name: Name) -> FnDefs<'a, 'gcx> {
let iter = self.flat_map(move |node| {
node.items(tcx)
.filter_map(move |item| {
/// Search the items from the given ancestors, returning each const
/// definition with the given name.
- pub fn const_defs(self, tcx: &'a TyCtxt<'tcx>, name: Name) -> ConstDefs<'a, 'tcx> {
+ pub fn const_defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, name: Name) -> ConstDefs<'a, 'gcx> {
let iter = self.flat_map(move |node| {
node.items(tcx)
.filter_map(move |item| {
use traits;
use traits::project::Normalized;
+use ty::{Lift, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use std::fmt;
}
}
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+impl<'a, 'tcx> Lift<'tcx> for traits::SelectionError<'a> {
+ type Lifted = traits::SelectionError<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match *self {
+ super::Unimplemented => Some(super::Unimplemented),
+ super::OutputTypeParameterMismatch(a, b, ref err) => {
+ tcx.lift(&(a, b)).and_then(|(a, b)| {
+ tcx.lift(err).map(|err| {
+ super::OutputTypeParameterMismatch(a, b, err)
+ })
+ })
+ }
+ super::TraitNotObjectSafe(def_id) => {
+ Some(super::TraitNotObjectSafe(def_id))
+ }
+ }
+ }
+}
+
+// For trans only.
+impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> {
+ type Lifted = traits::Vtable<'tcx, ()>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match self.clone() {
+ traits::VtableImpl(traits::VtableImplData {
+ impl_def_id,
+ substs,
+ nested
+ }) => {
+ tcx.lift(&substs).map(|substs| {
+ traits::VtableImpl(traits::VtableImplData {
+ impl_def_id: impl_def_id,
+ substs: substs,
+ nested: nested
+ })
+ })
+ }
+ traits::VtableDefaultImpl(t) => Some(traits::VtableDefaultImpl(t)),
+ traits::VtableClosure(traits::VtableClosureData {
+ closure_def_id,
+ substs,
+ nested
+ }) => {
+ tcx.lift(&substs).map(|substs| {
+ traits::VtableClosure(traits::VtableClosureData {
+ closure_def_id: closure_def_id,
+ substs: substs,
+ nested: nested
+ })
+ })
+ }
+ traits::VtableFnPointer(ty) => {
+ tcx.lift(&ty).map(traits::VtableFnPointer)
+ }
+ traits::VtableParam(n) => Some(traits::VtableParam(n)),
+ traits::VtableBuiltin(d) => Some(traits::VtableBuiltin(d)),
+ traits::VtableObject(traits::VtableObjectData {
+ upcast_trait_ref,
+ vtable_base
+ }) => {
+ tcx.lift(&upcast_trait_ref).map(|trait_ref| {
+ traits::VtableObject(traits::VtableObjectData {
+ upcast_trait_ref: trait_ref,
+ vtable_base: vtable_base
+ })
+ })
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// TypeFoldable implementations.
+
impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O>
{
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
traits::Obligation {
cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
}
impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
- let substs = self.substs.fold_with(folder);
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
traits::VtableImplData {
impl_def_id: self.impl_def_id,
- substs: folder.tcx().mk_substs(substs),
+ substs: self.substs.fold_with(folder),
nested: self.nested.fold_with(folder),
}
}
}
impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
traits::VtableClosureData {
closure_def_id: self.closure_def_id,
substs: self.substs.fold_with(folder),
}
impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableDefaultImplData<N> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
traits::VtableDefaultImplData {
trait_def_id: self.trait_def_id,
nested: self.nested.fold_with(folder),
}
impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableBuiltinData<N> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
traits::VtableBuiltinData {
nested: self.nested.fold_with(folder),
}
}
impl<'tcx> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
traits::VtableObjectData {
upcast_trait_ref: self.upcast_trait_ref.fold_with(folder),
vtable_base: self.vtable_base
}
impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)),
traits::VtableDefaultImpl(ref t) => traits::VtableDefaultImpl(t.fold_with(folder)),
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
Normalized {
value: self.value.fold_with(folder),
obligations: self.obligations.fold_with(folder),
use super::{Obligation, ObligationCause, PredicateObligation, SelectionContext, Normalized};
-fn anonymize_predicate<'tcx>(tcx: &TyCtxt<'tcx>, pred: &ty::Predicate<'tcx>)
- -> ty::Predicate<'tcx> {
+fn anonymize_predicate<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ pred: &ty::Predicate<'tcx>)
+ -> ty::Predicate<'tcx> {
match *pred {
ty::Predicate::Trait(ref data) =>
ty::Predicate::Trait(tcx.anonymize_late_bound_regions(data)),
}
-struct PredicateSet<'a,'tcx:'a> {
- tcx: &'a TyCtxt<'tcx>,
+struct PredicateSet<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
set: FnvHashSet<ty::Predicate<'tcx>>,
}
-impl<'a,'tcx> PredicateSet<'a,'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>) -> PredicateSet<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> PredicateSet<'a, 'gcx, 'tcx> {
+ fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> PredicateSet<'a, 'gcx, 'tcx> {
PredicateSet { tcx: tcx, set: FnvHashSet() }
}
/// that `T : PartialOrd` holds as well. Similarly, if we have `trait
/// Foo : 'static`, and we know that `T : Foo`, then we know that `T :
/// 'static`.
-pub struct Elaborator<'cx, 'tcx:'cx> {
- tcx: &'cx TyCtxt<'tcx>,
+pub struct Elaborator<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
stack: Vec<ty::Predicate<'tcx>>,
- visited: PredicateSet<'cx,'tcx>,
+ visited: PredicateSet<'a, 'gcx, 'tcx>,
}
-pub fn elaborate_trait_ref<'cx, 'tcx>(
- tcx: &'cx TyCtxt<'tcx>,
+pub fn elaborate_trait_ref<'cx, 'gcx, 'tcx>(
+ tcx: TyCtxt<'cx, 'gcx, 'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>)
- -> Elaborator<'cx, 'tcx>
+ -> Elaborator<'cx, 'gcx, 'tcx>
{
elaborate_predicates(tcx, vec![trait_ref.to_predicate()])
}
-pub fn elaborate_trait_refs<'cx, 'tcx>(
- tcx: &'cx TyCtxt<'tcx>,
+pub fn elaborate_trait_refs<'cx, 'gcx, 'tcx>(
+ tcx: TyCtxt<'cx, 'gcx, 'tcx>,
trait_refs: &[ty::PolyTraitRef<'tcx>])
- -> Elaborator<'cx, 'tcx>
+ -> Elaborator<'cx, 'gcx, 'tcx>
{
let predicates = trait_refs.iter()
.map(|trait_ref| trait_ref.to_predicate())
elaborate_predicates(tcx, predicates)
}
-pub fn elaborate_predicates<'cx, 'tcx>(
- tcx: &'cx TyCtxt<'tcx>,
+pub fn elaborate_predicates<'cx, 'gcx, 'tcx>(
+ tcx: TyCtxt<'cx, 'gcx, 'tcx>,
mut predicates: Vec<ty::Predicate<'tcx>>)
- -> Elaborator<'cx, 'tcx>
+ -> Elaborator<'cx, 'gcx, 'tcx>
{
let mut visited = PredicateSet::new(tcx);
predicates.retain(|pred| visited.insert(pred));
- Elaborator { tcx: tcx, stack: predicates, visited: visited }
+ Elaborator { stack: predicates, visited: visited }
}
-impl<'cx, 'tcx> Elaborator<'cx, 'tcx> {
- pub fn filter_to_traits(self) -> FilterToTraits<Elaborator<'cx, 'tcx>> {
+impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> {
+ pub fn filter_to_traits(self) -> FilterToTraits<Self> {
FilterToTraits::new(self)
}
fn push(&mut self, predicate: &ty::Predicate<'tcx>) {
+ let tcx = self.visited.tcx;
match *predicate {
ty::Predicate::Trait(ref data) => {
// Predicates declared on the trait.
- let predicates = self.tcx.lookup_super_predicates(data.def_id());
+ let predicates = tcx.lookup_super_predicates(data.def_id());
let mut predicates: Vec<_> =
predicates.predicates
.iter()
- .map(|p| p.subst_supertrait(self.tcx, &data.to_poly_trait_ref()))
+ .map(|p| p.subst_supertrait(tcx, &data.to_poly_trait_ref()))
.collect();
debug!("super_predicates: data={:?} predicates={:?}",
}
}
-impl<'cx, 'tcx> Iterator for Elaborator<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx> Iterator for Elaborator<'cx, 'gcx, 'tcx> {
type Item = ty::Predicate<'tcx>;
fn next(&mut self) -> Option<ty::Predicate<'tcx>> {
// Supertrait iterator
///////////////////////////////////////////////////////////////////////////
-pub type Supertraits<'cx, 'tcx> = FilterToTraits<Elaborator<'cx, 'tcx>>;
+pub type Supertraits<'cx, 'gcx, 'tcx> = FilterToTraits<Elaborator<'cx, 'gcx, 'tcx>>;
-pub fn supertraits<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>)
- -> Supertraits<'cx, 'tcx>
+pub fn supertraits<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>)
+ -> Supertraits<'cx, 'gcx, 'tcx>
{
elaborate_trait_ref(tcx, trait_ref).filter_to_traits()
}
-pub fn transitive_bounds<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
- bounds: &[ty::PolyTraitRef<'tcx>])
- -> Supertraits<'cx, 'tcx>
+pub fn transitive_bounds<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>,
+ bounds: &[ty::PolyTraitRef<'tcx>])
+ -> Supertraits<'cx, 'gcx, 'tcx>
{
elaborate_trait_refs(tcx, bounds).filter_to_traits()
}
///////////////////////////////////////////////////////////////////////////
// Iterator over def-ids of supertraits
-pub struct SupertraitDefIds<'cx, 'tcx:'cx> {
- tcx: &'cx TyCtxt<'tcx>,
+pub struct SupertraitDefIds<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
stack: Vec<DefId>,
visited: FnvHashSet<DefId>,
}
-pub fn supertrait_def_ids<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
- trait_def_id: DefId)
- -> SupertraitDefIds<'cx, 'tcx>
+pub fn supertrait_def_ids<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>,
+ trait_def_id: DefId)
+ -> SupertraitDefIds<'cx, 'gcx, 'tcx>
{
SupertraitDefIds {
tcx: tcx,
}
}
-impl<'cx, 'tcx> Iterator for SupertraitDefIds<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx> Iterator for SupertraitDefIds<'cx, 'gcx, 'tcx> {
type Item = DefId;
fn next(&mut self) -> Option<DefId> {
/// Instantiate all bound parameters of the impl with the given substs,
/// returning the resulting trait ref and all obligations that arise.
/// The obligations are closed under normalization.
-pub fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
- impl_def_id: DefId,
- impl_substs: &Substs<'tcx>)
- -> (ty::TraitRef<'tcx>,
- Vec<PredicateObligation<'tcx>>)
+pub fn impl_trait_ref_and_oblig<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId,
+ impl_substs: &Substs<'tcx>)
+ -> (ty::TraitRef<'tcx>,
+ Vec<PredicateObligation<'tcx>>)
{
let impl_trait_ref =
selcx.tcx().impl_trait_ref(impl_def_id).unwrap();
// declared on the impl declaration e.g., `impl<A,B> for Box<[(A,B)]>`
// would return ($0, $1) where $0 and $1 are freshly instantiated type
// variables.
-pub fn fresh_type_vars_for_impl<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- span: Span,
- impl_def_id: DefId)
- -> Substs<'tcx>
+pub fn fresh_type_vars_for_impl<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ span: Span,
+ impl_def_id: DefId)
+ -> &'tcx Substs<'tcx>
{
let tcx = infcx.tcx;
let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
}).collect()
}
-pub fn trait_ref_for_builtin_bound<'tcx>(
- tcx: &TyCtxt<'tcx>,
- builtin_bound: ty::BuiltinBound,
- param_ty: Ty<'tcx>)
- -> Result<ty::TraitRef<'tcx>, ErrorReported>
-{
- match tcx.lang_items.from_builtin_kind(builtin_bound) {
- Ok(def_id) => {
- Ok(ty::TraitRef {
- def_id: def_id,
- substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty))
- })
- }
- Err(e) => {
- tcx.sess.err(&e);
- Err(ErrorReported)
- }
- }
-}
-
pub fn predicate_for_trait_ref<'tcx>(
cause: ObligationCause<'tcx>,
trait_ref: ty::TraitRef<'tcx>,
}
}
-pub fn predicate_for_trait_def<'tcx>(
- tcx: &TyCtxt<'tcx>,
- cause: ObligationCause<'tcx>,
- trait_def_id: DefId,
- recursion_depth: usize,
- param_ty: Ty<'tcx>,
- ty_params: Vec<Ty<'tcx>>)
- -> PredicateObligation<'tcx>
-{
- let trait_ref = ty::TraitRef {
- def_id: trait_def_id,
- substs: tcx.mk_substs(Substs::new_trait(ty_params, vec![], param_ty))
- };
- predicate_for_trait_ref(cause, trait_ref, recursion_depth)
-}
-
-pub fn predicate_for_builtin_bound<'tcx>(
- tcx: &TyCtxt<'tcx>,
- cause: ObligationCause<'tcx>,
- builtin_bound: ty::BuiltinBound,
- recursion_depth: usize,
- param_ty: Ty<'tcx>)
- -> Result<PredicateObligation<'tcx>, ErrorReported>
-{
- let trait_ref = trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty)?;
- Ok(predicate_for_trait_ref(cause, trait_ref, recursion_depth))
-}
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn trait_ref_for_builtin_bound(self,
+ builtin_bound: ty::BuiltinBound,
+ param_ty: Ty<'tcx>)
+ -> Result<ty::TraitRef<'tcx>, ErrorReported>
+ {
+ match self.lang_items.from_builtin_kind(builtin_bound) {
+ Ok(def_id) => {
+ Ok(ty::TraitRef {
+ def_id: def_id,
+ substs: self.mk_substs(Substs::empty().with_self_ty(param_ty))
+ })
+ }
+ Err(e) => {
+ self.sess.err(&e);
+ Err(ErrorReported)
+ }
+ }
+ }
-/// Cast a trait reference into a reference to one of its super
-/// traits; returns `None` if `target_trait_def_id` is not a
-/// supertrait.
-pub fn upcast<'tcx>(tcx: &TyCtxt<'tcx>,
- source_trait_ref: ty::PolyTraitRef<'tcx>,
- target_trait_def_id: DefId)
- -> Vec<ty::PolyTraitRef<'tcx>>
-{
- if source_trait_ref.def_id() == target_trait_def_id {
- return vec![source_trait_ref]; // shorcut the most common case
+ pub fn predicate_for_trait_def(self,
+ cause: ObligationCause<'tcx>,
+ trait_def_id: DefId,
+ recursion_depth: usize,
+ param_ty: Ty<'tcx>,
+ ty_params: Vec<Ty<'tcx>>)
+ -> PredicateObligation<'tcx>
+ {
+ let trait_ref = ty::TraitRef {
+ def_id: trait_def_id,
+ substs: self.mk_substs(Substs::new_trait(ty_params, vec![], param_ty))
+ };
+ predicate_for_trait_ref(cause, trait_ref, recursion_depth)
}
- supertraits(tcx, source_trait_ref)
- .filter(|r| r.def_id() == target_trait_def_id)
- .collect()
-}
+ pub fn predicate_for_builtin_bound(self,
+ cause: ObligationCause<'tcx>,
+ builtin_bound: ty::BuiltinBound,
+ recursion_depth: usize,
+ param_ty: Ty<'tcx>)
+ -> Result<PredicateObligation<'tcx>, ErrorReported>
+ {
+ let trait_ref = self.trait_ref_for_builtin_bound(builtin_bound, param_ty)?;
+ Ok(predicate_for_trait_ref(cause, trait_ref, recursion_depth))
+ }
-/// Given a trait `trait_ref`, returns the number of vtable entries
-/// that come from `trait_ref`, excluding its supertraits. Used in
-/// computing the vtable base for an upcast trait of a trait object.
-pub fn count_own_vtable_entries<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>)
- -> usize {
- let mut entries = 0;
- // Count number of methods and add them to the total offset.
- // Skip over associated types and constants.
- for trait_item in &tcx.trait_items(trait_ref.def_id())[..] {
- if let ty::MethodTraitItem(_) = *trait_item {
- entries += 1;
+ /// Cast a trait reference into a reference to one of its super
+ /// traits; returns `None` if `target_trait_def_id` is not a
+ /// supertrait.
+ pub fn upcast_choices(self,
+ source_trait_ref: ty::PolyTraitRef<'tcx>,
+ target_trait_def_id: DefId)
+ -> Vec<ty::PolyTraitRef<'tcx>>
+ {
+ if source_trait_ref.def_id() == target_trait_def_id {
+ return vec![source_trait_ref]; // shorcut the most common case
}
+
+ supertraits(self, source_trait_ref)
+ .filter(|r| r.def_id() == target_trait_def_id)
+ .collect()
}
- entries
-}
-/// Given an upcast trait object described by `object`, returns the
-/// index of the method `method_def_id` (which should be part of
-/// `object.upcast_trait_ref`) within the vtable for `object`.
-pub fn get_vtable_index_of_object_method<'tcx>(tcx: &TyCtxt<'tcx>,
- object: &super::VtableObjectData<'tcx>,
- method_def_id: DefId) -> usize {
- // Count number of methods preceding the one we are selecting and
- // add them to the total offset.
- // Skip over associated types and constants.
- let mut entries = object.vtable_base;
- for trait_item in &tcx.trait_items(object.upcast_trait_ref.def_id())[..] {
- if trait_item.def_id() == method_def_id {
- // The item with the ID we were given really ought to be a method.
- assert!(match *trait_item {
- ty::MethodTraitItem(_) => true,
- _ => false
- });
-
- return entries;
+ /// Given a trait `trait_ref`, returns the number of vtable entries
+ /// that come from `trait_ref`, excluding its supertraits. Used in
+ /// computing the vtable base for an upcast trait of a trait object.
+ pub fn count_own_vtable_entries(self, trait_ref: ty::PolyTraitRef<'tcx>) -> usize {
+ let mut entries = 0;
+ // Count number of methods and add them to the total offset.
+ // Skip over associated types and constants.
+ for trait_item in &self.trait_items(trait_ref.def_id())[..] {
+ if let ty::MethodTraitItem(_) = *trait_item {
+ entries += 1;
+ }
}
- if let ty::MethodTraitItem(_) = *trait_item {
- entries += 1;
+ entries
+ }
+
+ /// Given an upcast trait object described by `object`, returns the
+ /// index of the method `method_def_id` (which should be part of
+ /// `object.upcast_trait_ref`) within the vtable for `object`.
+ pub fn get_vtable_index_of_object_method(self,
+ object: &super::VtableObjectData<'tcx>,
+ method_def_id: DefId) -> usize {
+ // Count number of methods preceding the one we are selecting and
+ // add them to the total offset.
+ // Skip over associated types and constants.
+ let mut entries = object.vtable_base;
+ for trait_item in &self.trait_items(object.upcast_trait_ref.def_id())[..] {
+ if trait_item.def_id() == method_def_id {
+ // The item with the ID we were given really ought to be a method.
+ assert!(match *trait_item {
+ ty::MethodTraitItem(_) => true,
+ _ => false
+ });
+
+ return entries;
+ }
+ if let ty::MethodTraitItem(_) = *trait_item {
+ entries += 1;
+ }
}
+
+ bug!("get_vtable_index_of_object_method: {:?} was not found",
+ method_def_id);
}
- bug!("get_vtable_index_of_object_method: {:?} was not found",
- method_def_id);
+ pub fn closure_trait_ref_and_return_type(self,
+ fn_trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ sig: &ty::PolyFnSig<'tcx>,
+ tuple_arguments: TupleArgumentsFlag)
+ -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)>
+ {
+ let arguments_tuple = match tuple_arguments {
+ TupleArgumentsFlag::No => sig.0.inputs[0],
+ TupleArgumentsFlag::Yes => self.mk_tup(sig.0.inputs.to_vec()),
+ };
+ let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty);
+ let trait_ref = ty::TraitRef {
+ def_id: fn_trait_def_id,
+ substs: self.mk_substs(trait_substs),
+ };
+ ty::Binder((trait_ref, sig.0.output.unwrap_or(self.mk_nil())))
+ }
}
pub enum TupleArgumentsFlag { Yes, No }
-
-pub fn closure_trait_ref_and_return_type<'tcx>(
- tcx: &TyCtxt<'tcx>,
- fn_trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- sig: &ty::PolyFnSig<'tcx>,
- tuple_arguments: TupleArgumentsFlag)
- -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)>
-{
- let arguments_tuple = match tuple_arguments {
- TupleArgumentsFlag::No => sig.0.inputs[0],
- TupleArgumentsFlag::Yes => tcx.mk_tup(sig.0.inputs.to_vec()),
- };
- let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty);
- let trait_ref = ty::TraitRef {
- def_id: fn_trait_def_id,
- substs: tcx.mk_substs(trait_substs),
- };
- ty::Binder((trait_ref, sig.0.output.unwrap_or(tcx.mk_nil())))
-}
/// Like subtyping, matching is really a binary relation, so the only
/// important thing about the result is Ok/Err. Also, matching never
/// affects any type variables or unification state.
-pub struct Match<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>
+pub struct Match<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> Match<'a, 'tcx> {
- pub fn new(tcx: &'a TyCtxt<'tcx>) -> Match<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Match<'a, 'gcx, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Match<'a, 'gcx, 'tcx> {
Match { tcx: tcx }
}
}
-impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Match<'a, 'gcx, 'tcx> {
fn tag(&self) -> &'static str { "Match" }
- fn tcx(&self) -> &'a TyCtxt<'tcx> { self.tcx }
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- _: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ _: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>
{
self.relate(a, b)
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a,'tcx>
+ where T: Relate<'tcx>
{
Ok(ty::Binder(self.relate(a.skip_binder(), b.skip_binder())?))
}
Struct(usize)
}
-impl<'tcx> ty::TyS<'tcx> {
+impl<'a, 'gcx, 'tcx> ty::TyS<'tcx> {
/// See `expr_ty_adjusted`
- pub fn adjust<F>(&'tcx self, cx: &TyCtxt<'tcx>,
+ pub fn adjust<F>(&'tcx self,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
span: Span,
expr_id: ast::NodeId,
adjustment: Option<&AutoAdjustment<'tcx>>,
match *adjustment {
AdjustReifyFnPointer => {
match self.sty {
- ty::TyFnDef(_, _, b) => {
- cx.mk_ty(ty::TyFnPtr(b))
- }
+ ty::TyFnDef(_, _, f) => tcx.mk_fn_ptr(f),
_ => {
bug!("AdjustReifyFnPointer adjustment on non-fn-item: {:?}",
self);
AdjustUnsafeFnPointer => {
match self.sty {
- ty::TyFnPtr(b) => cx.safe_to_unsafe_fn_ty(b),
+ ty::TyFnPtr(b) => tcx.safe_to_unsafe_fn_ty(b),
ref b => {
bug!("AdjustUnsafeFnPointer adjustment on non-fn-ptr: {:?}",
b);
AdjustMutToConstPointer => {
match self.sty {
- ty::TyRawPtr(mt) => cx.mk_ptr(ty::TypeAndMut {
+ ty::TyRawPtr(mt) => tcx.mk_ptr(ty::TypeAndMut {
ty: mt.ty,
mutbl: hir::MutImmutable
}),
if !adjusted_ty.references_error() {
for i in 0..adj.autoderefs {
adjusted_ty =
- adjusted_ty.adjust_for_autoderef(cx,
+ adjusted_ty.adjust_for_autoderef(tcx,
expr_id,
span,
i as u32,
if let Some(target) = adj.unsize {
target
} else {
- adjusted_ty.adjust_for_autoref(cx, adj.autoref)
+ adjusted_ty.adjust_for_autoref(tcx, adj.autoref)
}
}
}
}
pub fn adjust_for_autoderef<F>(&'tcx self,
- cx: &TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
expr_id: ast::NodeId,
expr_span: Span,
autoderef: u32, // how many autoderefs so far?
if let Some(method_ty) = method_type(method_call) {
// Method calls always have all late-bound regions
// fully instantiated.
- let fn_ret = cx.no_late_bound_regions(&method_ty.fn_ret()).unwrap();
+ let fn_ret = tcx.no_late_bound_regions(&method_ty.fn_ret()).unwrap();
adjusted_ty = fn_ret.unwrap();
}
match adjusted_ty.builtin_deref(true, NoPreference) {
}
}
- pub fn adjust_for_autoref(&'tcx self, cx: &TyCtxt<'tcx>,
+ pub fn adjust_for_autoref(&'tcx self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
autoref: Option<AutoRef<'tcx>>)
-> Ty<'tcx> {
match autoref {
None => self,
Some(AutoPtr(r, m)) => {
- cx.mk_ref(r, TypeAndMut { ty: self, mutbl: m })
+ tcx.mk_ref(r, TypeAndMut { ty: self, mutbl: m })
}
Some(AutoUnsafe(m)) => {
- cx.mk_ptr(TypeAndMut { ty: self, mutbl: m })
+ tcx.mk_ptr(TypeAndMut { ty: self, mutbl: m })
}
}
}
self.intersects(TC::InteriorUnsafe)
}
- pub fn needs_drop(&self, _: &TyCtxt) -> bool {
+ pub fn needs_drop(&self, _: TyCtxt) -> bool {
self.intersects(TC::NeedsDrop)
}
}
}
-impl<'tcx> ty::TyS<'tcx> {
- pub fn type_contents(&'tcx self, cx: &TyCtxt<'tcx>) -> TypeContents {
- return cx.tc_cache.memoize(self, || tc_ty(cx, self, &mut FnvHashMap()));
+impl<'a, 'tcx> ty::TyS<'tcx> {
+ pub fn type_contents(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> TypeContents {
+ return tcx.tc_cache.memoize(self, || tc_ty(tcx, self, &mut FnvHashMap()));
- fn tc_ty<'tcx>(cx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
+ fn tc_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ cache: &mut FnvHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
{
- // Subtle: Note that we are *not* using cx.tc_cache here but rather a
+ // Subtle: Note that we are *not* using tcx.tc_cache here but rather a
// private cache for this walk. This is needed in the case of cyclic
// types like:
//
// The problem is, as we are doing the computation, we will also
// compute an *intermediate* contents for, e.g., Option<List> of
// TC::None. This is ok during the computation of List itself, but if
- // we stored this intermediate value into cx.tc_cache, then later
+ // we stored this intermediate value into tcx.tc_cache, then later
// requests for the contents of Option<List> would also yield TC::None
// which is incorrect. This value was computed based on the crutch
// value for the type contents of list. The correct value is
Some(tc) => { return *tc; }
None => {}
}
- match cx.tc_cache.borrow().get(&ty) { // Must check both caches!
+ match tcx.tc_cache.borrow().get(&ty) { // Must check both caches!
Some(tc) => { return *tc; }
None => {}
}
}
ty::TyBox(typ) => {
- tc_ty(cx, typ, cache).owned_pointer()
+ tc_ty(tcx, typ, cache).owned_pointer()
}
ty::TyTrait(_) => {
}
ty::TyArray(ty, _) => {
- tc_ty(cx, ty, cache)
+ tc_ty(tcx, ty, cache)
}
ty::TySlice(ty) => {
- tc_ty(cx, ty, cache)
+ tc_ty(tcx, ty, cache)
}
ty::TyStr => TC::None,
ty::TyClosure(_, ref substs) => {
- TypeContents::union(&substs.upvar_tys, |ty| tc_ty(cx, &ty, cache))
+ TypeContents::union(&substs.upvar_tys, |ty| tc_ty(tcx, &ty, cache))
}
ty::TyTuple(ref tys) => {
TypeContents::union(&tys[..],
- |ty| tc_ty(cx, *ty, cache))
+ |ty| tc_ty(tcx, *ty, cache))
}
ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
let mut res =
TypeContents::union(&def.variants, |v| {
TypeContents::union(&v.fields, |f| {
- tc_ty(cx, f.ty(cx, substs), cache)
+ tc_ty(tcx, f.ty(tcx, substs), cache)
})
});
res = res | TC::OwnsDtor;
}
- apply_lang_items(cx, def.did, res)
+ apply_lang_items(tcx, def.did, res)
}
ty::TyProjection(..) |
result
}
- fn apply_lang_items(cx: &TyCtxt, did: DefId, tc: TypeContents)
- -> TypeContents {
- if Some(did) == cx.lang_items.unsafe_cell_type() {
+ fn apply_lang_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId, tc: TypeContents)
+ -> TypeContents {
+ if Some(did) == tcx.lang_items.unsafe_cell_type() {
tc | TC::InteriorUnsafe
} else {
tc
use middle::region::RegionMaps;
use middle::resolve_lifetime;
use middle::stability;
-use ty::subst::{self, Subst, Substs};
+use ty::subst::{self, Substs};
use traits;
use ty::{self, TraitRef, Ty, TypeAndMut};
use ty::{TyS, TypeVariants};
use ty::maps;
use util::common::MemoizationMap;
use util::nodemap::{NodeMap, NodeSet, DefIdMap, DefIdSet};
-use util::nodemap::FnvHashMap;
+use util::nodemap::{FnvHashMap, FnvHashSet};
use arena::TypedArena;
use std::borrow::Borrow;
use std::cell::{Cell, RefCell, Ref};
use std::hash::{Hash, Hasher};
+use std::mem;
+use std::ops::Deref;
use std::rc::Rc;
use syntax::ast::{self, Name, NodeId};
use syntax::attr;
pub struct CtxtArenas<'tcx> {
// internings
type_: TypedArena<TyS<'tcx>>,
+ type_list: TypedArena<Vec<Ty<'tcx>>>,
substs: TypedArena<Substs<'tcx>>,
bare_fn: TypedArena<BareFnTy<'tcx>>,
region: TypedArena<Region>,
pub fn new() -> CtxtArenas<'tcx> {
CtxtArenas {
type_: TypedArena::new(),
+ type_list: TypedArena::new(),
substs: TypedArena::new(),
bare_fn: TypedArena::new(),
region: TypedArena::new(),
}
}
+pub struct CtxtInterners<'tcx> {
+ /// The arenas that types etc are allocated from.
+ arenas: &'tcx CtxtArenas<'tcx>,
+
+ /// Specifically use a speedy hash algorithm for these hash sets,
+ /// they're accessed quite often.
+ type_: RefCell<FnvHashSet<Interned<'tcx, TyS<'tcx>>>>,
+ type_list: RefCell<FnvHashSet<Interned<'tcx, [Ty<'tcx>]>>>,
+ substs: RefCell<FnvHashSet<Interned<'tcx, Substs<'tcx>>>>,
+ bare_fn: RefCell<FnvHashSet<Interned<'tcx, BareFnTy<'tcx>>>>,
+ region: RefCell<FnvHashSet<Interned<'tcx, Region>>>,
+ stability: RefCell<FnvHashSet<&'tcx attr::Stability>>,
+ layout: RefCell<FnvHashSet<&'tcx Layout>>,
+}
+
+impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> {
+ fn new(arenas: &'tcx CtxtArenas<'tcx>) -> CtxtInterners<'tcx> {
+ CtxtInterners {
+ arenas: arenas,
+ type_: RefCell::new(FnvHashSet()),
+ type_list: RefCell::new(FnvHashSet()),
+ substs: RefCell::new(FnvHashSet()),
+ bare_fn: RefCell::new(FnvHashSet()),
+ region: RefCell::new(FnvHashSet()),
+ stability: RefCell::new(FnvHashSet()),
+ layout: RefCell::new(FnvHashSet())
+ }
+ }
+
+ /// Intern a type. global_interners is Some only if this is
+ /// a local interner and global_interners is its counterpart.
+ fn intern_ty(&self, st: TypeVariants<'tcx>,
+ global_interners: Option<&CtxtInterners<'gcx>>)
+ -> Ty<'tcx> {
+ let ty = {
+ let mut interner = self.type_.borrow_mut();
+ let global_interner = global_interners.map(|interners| {
+ interners.type_.borrow_mut()
+ });
+ if let Some(&Interned(ty)) = interner.get(&st) {
+ return ty;
+ }
+ if let Some(ref interner) = global_interner {
+ if let Some(&Interned(ty)) = interner.get(&st) {
+ return ty;
+ }
+ }
+
+ let flags = super::flags::FlagComputation::for_sty(&st);
+ let ty_struct = TyS {
+ sty: st,
+ flags: Cell::new(flags.flags),
+ region_depth: flags.depth,
+ };
+
+ // HACK(eddyb) Depend on flags being accurate to
+ // determine that all contents are in the global tcx.
+ // See comments on Lift for why we can't use that.
+ if !flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) {
+ if let Some(interner) = global_interners {
+ let ty_struct: TyS<'gcx> = unsafe {
+ mem::transmute(ty_struct)
+ };
+ let ty: Ty<'gcx> = interner.arenas.type_.alloc(ty_struct);
+ global_interner.unwrap().insert(Interned(ty));
+ return ty;
+ }
+ } else {
+ // Make sure we don't end up with inference
+ // types/regions in the global tcx.
+ if global_interners.is_none() {
+ drop(interner);
+ bug!("Attempted to intern `{:?}` which contains \
+ inference types/regions in the global type context",
+ &ty_struct);
+ }
+ }
+
+ // Don't be &mut TyS.
+ let ty: Ty<'tcx> = self.arenas.type_.alloc(ty_struct);
+ interner.insert(Interned(ty));
+ ty
+ };
+
+ debug!("Interned type: {:?} Pointer: {:?}",
+ ty, ty as *const TyS);
+ ty
+ }
+
+}
+
pub struct CommonTypes<'tcx> {
pub bool: Ty<'tcx>,
pub char: Ty<'tcx>,
pub fru_field_types: NodeMap<Vec<Ty<'tcx>>>
}
-impl<'tcx> Tables<'tcx> {
+impl<'a, 'gcx, 'tcx> Tables<'tcx> {
pub fn empty() -> Tables<'tcx> {
Tables {
node_types: FnvHashMap(),
fru_field_types: NodeMap()
}
}
-
- pub fn closure_kind(this: &RefCell<Self>,
- tcx: &TyCtxt<'tcx>,
- def_id: DefId)
- -> ty::ClosureKind {
- // If this is a local def-id, it should be inserted into the
- // tables by typeck; else, it will be retreived from
- // the external crate metadata.
- if let Some(&kind) = this.borrow().closure_kinds.get(&def_id) {
- return kind;
- }
-
- let kind = tcx.sess.cstore.closure_kind(tcx, def_id);
- this.borrow_mut().closure_kinds.insert(def_id, kind);
- kind
- }
-
- pub fn closure_type(this: &RefCell<Self>,
- tcx: &TyCtxt<'tcx>,
- def_id: DefId,
- substs: &ClosureSubsts<'tcx>)
- -> ty::ClosureTy<'tcx>
- {
- // If this is a local def-id, it should be inserted into the
- // tables by typeck; else, it will be retreived from
- // the external crate metadata.
- if let Some(ty) = this.borrow().closure_tys.get(&def_id) {
- return ty.subst(tcx, &substs.func_substs);
- }
-
- let ty = tcx.sess.cstore.closure_ty(tcx, def_id);
- this.borrow_mut().closure_tys.insert(def_id, ty.clone());
- ty.subst(tcx, &substs.func_substs)
- }
}
impl<'tcx> CommonTypes<'tcx> {
- fn new(arena: &'tcx TypedArena<TyS<'tcx>>,
- interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>)
- -> CommonTypes<'tcx>
- {
- let mk = |sty| TyCtxt::intern_ty(arena, interner, sty);
+ fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
+ let mk = |sty| interners.intern_ty(sty, None);
CommonTypes {
bool: mk(TyBool),
char: mk(TyChar),
/// The data structure to keep track of all the information that typechecker
/// generates so that so that it can be reused and doesn't have to be redone
/// later on.
-pub struct TyCtxt<'tcx> {
- /// The arenas that types etc are allocated from.
- arenas: &'tcx CtxtArenas<'tcx>,
+#[derive(Copy, Clone)]
+pub struct TyCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ gcx: &'a GlobalCtxt<'gcx>,
+ interners: &'a CtxtInterners<'tcx>
+}
- /// Specifically use a speedy hash algorithm for this hash map, it's used
- /// quite often.
- // FIXME(eddyb) use a FnvHashSet<InternedTy<'tcx>> when equivalent keys can
- // queried from a HashSet.
- interner: RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
+impl<'a, 'gcx, 'tcx> Deref for TyCtxt<'a, 'gcx, 'tcx> {
+ type Target = &'a GlobalCtxt<'gcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.gcx
+ }
+}
- // FIXME as above, use a hashset if equivalent elements can be queried.
- substs_interner: RefCell<FnvHashMap<&'tcx Substs<'tcx>, &'tcx Substs<'tcx>>>,
- bare_fn_interner: RefCell<FnvHashMap<&'tcx BareFnTy<'tcx>, &'tcx BareFnTy<'tcx>>>,
- region_interner: RefCell<FnvHashMap<&'tcx Region, &'tcx Region>>,
- stability_interner: RefCell<FnvHashMap<&'tcx attr::Stability, &'tcx attr::Stability>>,
- layout_interner: RefCell<FnvHashMap<&'tcx Layout, &'tcx Layout>>,
+pub struct GlobalCtxt<'tcx> {
+ global_interners: CtxtInterners<'tcx>,
pub dep_graph: DepGraph,
// Cache for the type-contents routine. FIXME -- track deps?
pub tc_cache: RefCell<FnvHashMap<Ty<'tcx>, ty::contents::TypeContents>>,
- // Cache for various types within a method body and so forth.
- //
- // FIXME this should be made local to typeck, but it is currently used by one lint
- pub ast_ty_to_ty_cache: RefCell<NodeMap<Ty<'tcx>>>,
-
// FIXME no dep tracking, but we should be able to remove this
pub ty_param_defs: RefCell<NodeMap<ty::TypeParameterDef<'tcx>>>,
pub layout_cache: RefCell<FnvHashMap<Ty<'tcx>, &'tcx Layout>>,
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn crate_name(&self, cnum: ast::CrateNum) -> token::InternedString {
+impl<'tcx> GlobalCtxt<'tcx> {
+ /// Get the global TyCtxt.
+ pub fn global_tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
+ TyCtxt {
+ gcx: self,
+ interners: &self.global_interners
+ }
+ }
+}
+
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn crate_name(self, cnum: ast::CrateNum) -> token::InternedString {
if cnum == LOCAL_CRATE {
self.crate_name.clone()
} else {
}
}
- pub fn crate_disambiguator(&self, cnum: ast::CrateNum) -> token::InternedString {
+ pub fn crate_disambiguator(self, cnum: ast::CrateNum) -> token::InternedString {
if cnum == LOCAL_CRATE {
self.sess.crate_disambiguator.get().as_str()
} else {
}
}
- pub fn type_parameter_def(&self,
+ pub fn type_parameter_def(self,
node_id: NodeId)
-> ty::TypeParameterDef<'tcx>
{
self.ty_param_defs.borrow().get(&node_id).unwrap().clone()
}
- pub fn node_types(&self) -> Ref<NodeMap<Ty<'tcx>>> {
+ pub fn node_types(self) -> Ref<'a, NodeMap<Ty<'tcx>>> {
fn projection<'a, 'tcx>(tables: &'a Tables<'tcx>) -> &'a NodeMap<Ty<'tcx>> {
&tables.node_types
}
Ref::map(self.tables.borrow(), projection)
}
- pub fn node_type_insert(&self, id: NodeId, ty: Ty<'tcx>) {
+ pub fn node_type_insert(self, id: NodeId, ty: Ty<'gcx>) {
self.tables.borrow_mut().node_types.insert(id, ty);
}
- pub fn intern_trait_def(&self, def: ty::TraitDef<'tcx>)
- -> &'tcx ty::TraitDef<'tcx> {
+ pub fn intern_trait_def(self, def: ty::TraitDef<'gcx>)
+ -> &'gcx ty::TraitDef<'gcx> {
let did = def.trait_ref.def_id;
- let interned = self.arenas.trait_defs.alloc(def);
+ let interned = self.global_interners.arenas.trait_defs.alloc(def);
if let Some(prev) = self.trait_defs.borrow_mut().insert(did, interned) {
bug!("Tried to overwrite interned TraitDef: {:?}", prev)
}
interned
}
- pub fn alloc_trait_def(&self, def: ty::TraitDef<'tcx>)
- -> &'tcx ty::TraitDef<'tcx> {
- self.arenas.trait_defs.alloc(def)
+ pub fn alloc_trait_def(self, def: ty::TraitDef<'gcx>)
+ -> &'gcx ty::TraitDef<'gcx> {
+ self.global_interners.arenas.trait_defs.alloc(def)
}
- pub fn intern_adt_def(&self,
+ pub fn intern_adt_def(self,
did: DefId,
kind: ty::AdtKind,
- variants: Vec<ty::VariantDefData<'tcx, 'tcx>>)
- -> ty::AdtDefMaster<'tcx> {
+ variants: Vec<ty::VariantDefData<'gcx, 'gcx>>)
+ -> ty::AdtDefMaster<'gcx> {
let def = ty::AdtDefData::new(self, did, kind, variants);
- let interned = self.arenas.adt_defs.alloc(def);
+ let interned = self.global_interners.arenas.adt_defs.alloc(def);
// this will need a transmute when reverse-variance is removed
if let Some(prev) = self.adt_defs.borrow_mut().insert(did, interned) {
bug!("Tried to overwrite interned AdtDef: {:?}", prev)
interned
}
- pub fn intern_stability(&self, stab: attr::Stability) -> &'tcx attr::Stability {
- if let Some(st) = self.stability_interner.borrow().get(&stab) {
+ pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability {
+ if let Some(st) = self.global_interners.stability.borrow().get(&stab) {
return st;
}
- let interned = self.arenas.stability.alloc(stab);
- if let Some(prev) = self.stability_interner
+ let interned = self.global_interners.arenas.stability.alloc(stab);
+ if let Some(prev) = self.global_interners.stability
.borrow_mut()
- .insert(interned, interned) {
+ .replace(interned) {
bug!("Tried to overwrite interned Stability: {:?}", prev)
}
interned
}
- pub fn intern_layout(&self, layout: Layout) -> &'tcx Layout {
- if let Some(layout) = self.layout_interner.borrow().get(&layout) {
+ pub fn intern_layout(self, layout: Layout) -> &'gcx Layout {
+ if let Some(layout) = self.global_interners.layout.borrow().get(&layout) {
return layout;
}
- let interned = self.arenas.layout.alloc(layout);
- if let Some(prev) = self.layout_interner
+ let interned = self.global_interners.arenas.layout.alloc(layout);
+ if let Some(prev) = self.global_interners.layout
.borrow_mut()
- .insert(interned, interned) {
+ .replace(interned) {
bug!("Tried to overwrite interned Layout: {:?}", prev)
}
interned
}
- pub fn store_free_region_map(&self, id: NodeId, map: FreeRegionMap) {
+ pub fn store_free_region_map(self, id: NodeId, map: FreeRegionMap) {
if self.free_region_maps.borrow_mut().insert(id, map).is_some() {
bug!("Tried to overwrite interned FreeRegionMap for NodeId {:?}", id)
}
}
- pub fn free_region_map(&self, id: NodeId) -> FreeRegionMap {
+ pub fn free_region_map(self, id: NodeId) -> FreeRegionMap {
self.free_region_maps.borrow()[&id].clone()
}
- pub fn lift<T: ?Sized + Lift<'tcx>>(&self, value: &T) -> Option<T::Lifted> {
+ pub fn lift<T: ?Sized + Lift<'tcx>>(self, value: &T) -> Option<T::Lifted> {
value.lift_to_tcx(self)
}
- /// Create a type context and call the closure with a `&TyCtxt` reference
+ /// Like lift, but only tries in the global tcx.
+ pub fn lift_to_global<T: ?Sized + Lift<'gcx>>(self, value: &T) -> Option<T::Lifted> {
+ value.lift_to_tcx(self.global_tcx())
+ }
+
+ /// Returns true if self is the same as self.global_tcx().
+ fn is_global(self) -> bool {
+ let local = self.interners as *const _;
+ let global = &self.global_interners as *const _;
+ local as usize == global as usize
+ }
+
+ /// Create a type context and call the closure with a `TyCtxt` reference
/// to the context. The closure enforces that the type context and any interned
/// value (types, substs, etc.) can only be used while `ty::tls` has a valid
/// reference to the context, to allow formatting values that need it.
pub fn create_and_enter<F, R>(s: &'tcx Session,
- arenas: &'tcx CtxtArenas<'tcx>,
- def_map: RefCell<DefMap>,
- named_region_map: resolve_lifetime::NamedRegionMap,
- map: ast_map::Map<'tcx>,
- freevars: FreevarMap,
+ arenas: &'tcx CtxtArenas<'tcx>,
+ def_map: RefCell<DefMap>,
+ named_region_map: resolve_lifetime::NamedRegionMap,
+ map: ast_map::Map<'tcx>,
+ freevars: FreevarMap,
maybe_unused_trait_imports: NodeSet,
- region_maps: RegionMaps,
- lang_items: middle::lang_items::LanguageItems,
- stability: stability::Index<'tcx>,
+ region_maps: RegionMaps,
+ lang_items: middle::lang_items::LanguageItems,
+ stability: stability::Index<'tcx>,
crate_name: &str,
- f: F) -> R
- where F: FnOnce(&TyCtxt<'tcx>) -> R
+ f: F) -> R
+ where F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'tcx>) -> R
{
let data_layout = TargetDataLayout::parse(s);
- let interner = RefCell::new(FnvHashMap());
- let common_types = CommonTypes::new(&arenas.type_, &interner);
+ let interners = CtxtInterners::new(arenas);
+ let common_types = CommonTypes::new(&interners);
let dep_graph = map.dep_graph.clone();
let fulfilled_predicates = traits::GlobalFulfilledPredicates::new(dep_graph.clone());
- tls::enter(TyCtxt {
- arenas: arenas,
- interner: interner,
- substs_interner: RefCell::new(FnvHashMap()),
- bare_fn_interner: RefCell::new(FnvHashMap()),
- region_interner: RefCell::new(FnvHashMap()),
- stability_interner: RefCell::new(FnvHashMap()),
- layout_interner: RefCell::new(FnvHashMap()),
+ tls::enter_global(GlobalCtxt {
+ global_interners: interners,
dep_graph: dep_graph.clone(),
types: common_types,
named_region_map: named_region_map,
tcache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
rcache: RefCell::new(FnvHashMap()),
tc_cache: RefCell::new(FnvHashMap()),
- ast_ty_to_ty_cache: RefCell::new(NodeMap()),
impl_or_trait_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
trait_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
trait_items_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
}
}
+impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> {
+ /// Call the closure with a local `TyCtxt` using the given arenas.
+ pub fn enter_local<F, R>(&self, arenas: &'tcx CtxtArenas<'tcx>, f: F) -> R
+ where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R
+ {
+ let interners = CtxtInterners::new(arenas);
+ tls::enter(self, &interners, f)
+ }
+}
+
/// A trait implemented for all X<'a> types which can be safely and
/// efficiently converted to X<'tcx> as long as they are part of the
/// provided TyCtxt<'tcx>.
/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx>
/// by looking them up in their respective interners.
+///
+/// However, this is still not the best implementation as it does
+/// need to compare the components, even for interned values.
+/// It would be more efficient if TypedArena provided a way to
+/// determine whether the address is in the allocated range.
+///
/// None is returned if the value or one of the components is not part
/// of the provided context.
/// For Ty, None can be returned if either the type interner doesn't
/// e.g. `()` or `u8`, was interned in a different context.
pub trait Lift<'tcx> {
type Lifted;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted>;
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted>;
}
impl<'a, 'tcx> Lift<'tcx> for Ty<'a> {
type Lifted = Ty<'tcx>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Ty<'tcx>> {
- if let Some(&ty) = tcx.interner.borrow().get(&self.sty) {
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
+ if let Some(&Interned(ty)) = tcx.interners.type_.borrow().get(&self.sty) {
if *self as *const _ == ty as *const _ {
return Some(ty);
}
}
- None
+ // Also try in the global tcx if we're not that.
+ if !tcx.is_global() {
+ self.lift_to_tcx(tcx.global_tcx())
+ } else {
+ None
+ }
}
}
impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> {
type Lifted = &'tcx Substs<'tcx>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<&'tcx Substs<'tcx>> {
- if let Some(&substs) = tcx.substs_interner.borrow().get(*self) {
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> {
+ if let Some(&Interned(substs)) = tcx.interners.substs.borrow().get(*self) {
if *self as *const _ == substs as *const _ {
return Some(substs);
}
}
- None
+ // Also try in the global tcx if we're not that.
+ if !tcx.is_global() {
+ self.lift_to_tcx(tcx.global_tcx())
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for &'a Region {
+ type Lifted = &'tcx Region;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Region> {
+ if let Some(&Interned(region)) = tcx.interners.region.borrow().get(*self) {
+ if *self as *const _ == region as *const _ {
+ return Some(region);
+ }
+ }
+ // Also try in the global tcx if we're not that.
+ if !tcx.is_global() {
+ self.lift_to_tcx(tcx.global_tcx())
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for &'a [Ty<'a>] {
+ type Lifted = &'tcx [Ty<'tcx>];
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx [Ty<'tcx>]> {
+ if let Some(&Interned(list)) = tcx.interners.type_list.borrow().get(*self) {
+ if *self as *const _ == list as *const _ {
+ return Some(list);
+ }
+ }
+ // Also try in the global tcx if we're not that.
+ if !tcx.is_global() {
+ self.lift_to_tcx(tcx.global_tcx())
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for &'a BareFnTy<'a> {
+ type Lifted = &'tcx BareFnTy<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
+ -> Option<&'tcx BareFnTy<'tcx>> {
+ if let Some(&Interned(fty)) = tcx.interners.bare_fn.borrow().get(*self) {
+ if *self as *const _ == fty as *const _ {
+ return Some(fty);
+ }
+ }
+ // Also try in the global tcx if we're not that.
+ if !tcx.is_global() {
+ self.lift_to_tcx(tcx.global_tcx())
+ } else {
+ None
+ }
}
}
pub mod tls {
- use ty::TyCtxt;
+ use super::{CtxtInterners, GlobalCtxt, TyCtxt};
use std::cell::Cell;
use std::fmt;
use syntax::codemap;
- /// Marker type used for the scoped TLS slot.
+ /// Marker types used for the scoped TLS slot.
/// The type context cannot be used directly because the scoped TLS
/// in libstd doesn't allow types generic over lifetimes.
- struct ThreadLocalTyCx;
+ enum ThreadLocalGlobalCtxt {}
+ enum ThreadLocalInterners {}
thread_local! {
- static TLS_TCX: Cell<Option<*const ThreadLocalTyCx>> = Cell::new(None)
+ static TLS_TCX: Cell<Option<(*const ThreadLocalGlobalCtxt,
+ *const ThreadLocalInterners)>> = Cell::new(None)
}
fn span_debug(span: codemap::Span, f: &mut fmt::Formatter) -> fmt::Result {
})
}
- pub fn enter<'tcx, F: FnOnce(&TyCtxt<'tcx>) -> R, R>(tcx: TyCtxt<'tcx>, f: F) -> R {
+ pub fn enter_global<'gcx, F, R>(gcx: GlobalCtxt<'gcx>, f: F) -> R
+ where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R
+ {
codemap::SPAN_DEBUG.with(|span_dbg| {
let original_span_debug = span_dbg.get();
span_dbg.set(span_debug);
- let tls_ptr = &tcx as *const _ as *const ThreadLocalTyCx;
- let result = TLS_TCX.with(|tls| {
- let prev = tls.get();
- tls.set(Some(tls_ptr));
- let ret = f(&tcx);
- tls.set(prev);
- ret
- });
+ let result = enter(&gcx, &gcx.global_interners, f);
span_dbg.set(original_span_debug);
result
})
}
- pub fn with<F: FnOnce(&TyCtxt) -> R, R>(f: F) -> R {
+ pub fn enter<'a, 'gcx: 'tcx, 'tcx, F, R>(gcx: &'a GlobalCtxt<'gcx>,
+ interners: &'a CtxtInterners<'tcx>,
+ f: F) -> R
+ where F: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R
+ {
+ let gcx_ptr = gcx as *const _ as *const ThreadLocalGlobalCtxt;
+ let interners_ptr = interners as *const _ as *const ThreadLocalInterners;
+ TLS_TCX.with(|tls| {
+ let prev = tls.get();
+ tls.set(Some((gcx_ptr, interners_ptr)));
+ let ret = f(TyCtxt {
+ gcx: gcx,
+ interners: interners
+ });
+ tls.set(prev);
+ ret
+ })
+ }
+
+ pub fn with<F, R>(f: F) -> R
+ where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R
+ {
TLS_TCX.with(|tcx| {
- let tcx = tcx.get().unwrap();
- f(unsafe { &*(tcx as *const TyCtxt) })
+ let (gcx, interners) = tcx.get().unwrap();
+ let gcx = unsafe { &*(gcx as *const GlobalCtxt) };
+ let interners = unsafe { &*(interners as *const CtxtInterners) };
+ f(TyCtxt {
+ gcx: gcx,
+ interners: interners
+ })
})
}
- pub fn with_opt<F: FnOnce(Option<&TyCtxt>) -> R, R>(f: F) -> R {
+ pub fn with_opt<F, R>(f: F) -> R
+ where F: for<'a, 'gcx, 'tcx> FnOnce(Option<TyCtxt<'a, 'gcx, 'tcx>>) -> R
+ {
if TLS_TCX.with(|tcx| tcx.get().is_some()) {
with(|v| f(Some(v)))
} else {
#[allow(non_snake_case)]
mod inner {
use ty::{self, TyCtxt};
+ use ty::context::Interned;
+
#[derive(Copy, Clone)]
struct DebugStat {
total: usize,
both_infer: usize,
}
- pub fn go(tcx: &TyCtxt) {
+ pub fn go(tcx: TyCtxt) {
let mut total = DebugStat {
total: 0,
region_infer: 0, ty_infer: 0, both_infer: 0,
$(let mut $variant = total;)*
- for (_, t) in tcx.interner.borrow().iter() {
+ for &Interned(t) in tcx.interners.type_.borrow().iter() {
let variant = match t.sty {
ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
ty::TyFloat(..) | ty::TyStr => continue,
}}
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn print_debug_stats(&self) {
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ pub fn print_debug_stats(self) {
sty_debug_print!(
self,
TyEnum, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr,
TyTrait, TyStruct, TyClosure, TyTuple, TyParam, TyInfer, TyProjection);
- println!("Substs interner: #{}", self.substs_interner.borrow().len());
- println!("BareFnTy interner: #{}", self.bare_fn_interner.borrow().len());
- println!("Region interner: #{}", self.region_interner.borrow().len());
- println!("Stability interner: #{}", self.stability_interner.borrow().len());
- println!("Layout interner: #{}", self.layout_interner.borrow().len());
+ println!("Substs interner: #{}", self.interners.substs.borrow().len());
+ println!("BareFnTy interner: #{}", self.interners.bare_fn.borrow().len());
+ println!("Region interner: #{}", self.interners.region.borrow().len());
+ println!("Stability interner: #{}", self.interners.stability.borrow().len());
+ println!("Layout interner: #{}", self.interners.layout.borrow().len());
}
}
-/// An entry in the type interner.
-pub struct InternedTy<'tcx> {
- ty: Ty<'tcx>
-}
+/// An entry in an interner.
+struct Interned<'tcx, T: 'tcx+?Sized>(&'tcx T);
-// NB: An InternedTy compares and hashes as a sty.
-impl<'tcx> PartialEq for InternedTy<'tcx> {
- fn eq(&self, other: &InternedTy<'tcx>) -> bool {
- self.ty.sty == other.ty.sty
+// NB: An Interned<Ty> compares and hashes as a sty.
+impl<'tcx> PartialEq for Interned<'tcx, TyS<'tcx>> {
+ fn eq(&self, other: &Interned<'tcx, TyS<'tcx>>) -> bool {
+ self.0.sty == other.0.sty
}
}
-impl<'tcx> Eq for InternedTy<'tcx> {}
+impl<'tcx> Eq for Interned<'tcx, TyS<'tcx>> {}
-impl<'tcx> Hash for InternedTy<'tcx> {
+impl<'tcx> Hash for Interned<'tcx, TyS<'tcx>> {
fn hash<H: Hasher>(&self, s: &mut H) {
- self.ty.sty.hash(s)
+ self.0.sty.hash(s)
}
}
-impl<'tcx> Borrow<TypeVariants<'tcx>> for InternedTy<'tcx> {
- fn borrow<'a>(&'a self) -> &'a TypeVariants<'tcx> {
- &self.ty.sty
+impl<'tcx: 'lcx, 'lcx> Borrow<TypeVariants<'lcx>> for Interned<'tcx, TyS<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a TypeVariants<'lcx> {
+ &self.0.sty
}
}
-fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool {
- bounds.is_empty() ||
- bounds[1..].iter().enumerate().all(
- |(index, bound)| bounds[index].sort_key() <= bound.sort_key())
+impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, [Ty<'tcx>]> {
+ fn borrow<'a>(&'a self) -> &'a [Ty<'lcx>] {
+ self.0
+ }
}
-impl<'tcx> TyCtxt<'tcx> {
- // Type constructors
- pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
- if let Some(substs) = self.substs_interner.borrow().get(&substs) {
- return *substs;
- }
+impl<'tcx: 'lcx, 'lcx> Borrow<Substs<'lcx>> for Interned<'tcx, Substs<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a Substs<'lcx> {
+ self.0
+ }
+}
- let substs = self.arenas.substs.alloc(substs);
- self.substs_interner.borrow_mut().insert(substs, substs);
- substs
+impl<'tcx: 'lcx, 'lcx> Borrow<BareFnTy<'lcx>> for Interned<'tcx, BareFnTy<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a BareFnTy<'lcx> {
+ self.0
}
+}
- /// Create an unsafe fn ty based on a safe fn ty.
- pub fn safe_to_unsafe_fn_ty(&self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> {
- assert_eq!(bare_fn.unsafety, hir::Unsafety::Normal);
- self.mk_fn_ptr(ty::BareFnTy {
- unsafety: hir::Unsafety::Unsafe,
- abi: bare_fn.abi,
- sig: bare_fn.sig.clone()
- })
+impl<'tcx> Borrow<Region> for Interned<'tcx, Region> {
+ fn borrow<'a>(&'a self) -> &'a Region {
+ self.0
}
+}
- pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
- if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
- return *bare_fn;
+macro_rules! items { ($($item:item)+) => ($($item)+) }
+macro_rules! impl_interners {
+ ($lt_tcx:tt, $($name:ident: $method:ident($alloc:ty, $needs_infer:expr)-> $ty:ty),+) => {
+ items!($(impl<$lt_tcx> PartialEq for Interned<$lt_tcx, $ty> {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
}
- let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
- self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
- bare_fn
- }
+ impl<$lt_tcx> Eq for Interned<$lt_tcx, $ty> {}
- pub fn mk_region(&self, region: Region) -> &'tcx Region {
- if let Some(region) = self.region_interner.borrow().get(®ion) {
- return *region;
+ impl<$lt_tcx> Hash for Interned<$lt_tcx, $ty> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ self.0.hash(s)
+ }
}
- let region = self.arenas.region.alloc(region);
- self.region_interner.borrow_mut().insert(region, region);
- region
- }
+ impl<'a, 'gcx, $lt_tcx> TyCtxt<'a, 'gcx, $lt_tcx> {
+ pub fn $method(self, v: $alloc) -> &$lt_tcx $ty {
+ if let Some(i) = self.interners.$name.borrow().get::<$ty>(&v) {
+ return i.0;
+ }
+ if !self.is_global() {
+ if let Some(i) = self.global_interners.$name.borrow().get::<$ty>(&v) {
+ return i.0;
+ }
+ }
- fn intern_ty(type_arena: &'tcx TypedArena<TyS<'tcx>>,
- interner: &RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
- st: TypeVariants<'tcx>)
- -> Ty<'tcx> {
- let ty: Ty /* don't be &mut TyS */ = {
- let mut interner = interner.borrow_mut();
- match interner.get(&st) {
- Some(ty) => return *ty,
- _ => ()
+ // HACK(eddyb) Depend on flags being accurate to
+ // determine that all contents are in the global tcx.
+ // See comments on Lift for why we can't use that.
+ if !($needs_infer)(&v) {
+ if !self.is_global() {
+ let v = unsafe {
+ mem::transmute(v)
+ };
+ let i = self.global_interners.arenas.$name.alloc(v);
+ self.global_interners.$name.borrow_mut().insert(Interned(i));
+ return i;
+ }
+ } else {
+ // Make sure we don't end up with inference
+ // types/regions in the global tcx.
+ if self.is_global() {
+ bug!("Attempted to intern `{:?}` which contains \
+ inference types/regions in the global type context",
+ v);
+ }
+ }
+
+ let i = self.interners.arenas.$name.alloc(v);
+ self.interners.$name.borrow_mut().insert(Interned(i));
+ i
}
+ })+);
+ }
+}
- let flags = super::flags::FlagComputation::for_sty(&st);
+fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool {
+ x.has_type_flags(ty::TypeFlags::KEEP_IN_LOCAL_TCX)
+}
- let ty = match () {
- () => type_arena.alloc(TyS { sty: st,
- flags: Cell::new(flags.flags),
- region_depth: flags.depth, }),
- };
+impl_interners!('tcx,
+ type_list: mk_type_list(Vec<Ty<'tcx>>, keep_local) -> [Ty<'tcx>],
+ substs: mk_substs(Substs<'tcx>, |substs: &Substs| {
+ keep_local(&substs.types) || keep_local(&substs.regions)
+ }) -> Substs<'tcx>,
+ bare_fn: mk_bare_fn(BareFnTy<'tcx>, |fty: &BareFnTy| {
+ keep_local(&fty.sig)
+ }) -> BareFnTy<'tcx>,
+ region: mk_region(Region, keep_local) -> Region
+);
- interner.insert(InternedTy { ty: ty }, ty);
- ty
- };
+fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool {
+ bounds.is_empty() ||
+ bounds[1..].iter().enumerate().all(
+ |(index, bound)| bounds[index].sort_key() <= bound.sort_key())
+}
- debug!("Interned type: {:?} Pointer: {:?}",
- ty, ty as *const TyS);
- ty
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ /// Create an unsafe fn ty based on a safe fn ty.
+ pub fn safe_to_unsafe_fn_ty(self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> {
+ assert_eq!(bare_fn.unsafety, hir::Unsafety::Normal);
+ self.mk_fn_ptr(self.mk_bare_fn(ty::BareFnTy {
+ unsafety: hir::Unsafety::Unsafe,
+ abi: bare_fn.abi,
+ sig: bare_fn.sig.clone()
+ }))
}
- // Interns a type/name combination, stores the resulting box in cx.interner,
+ // Interns a type/name combination, stores the resulting box in cx.interners,
// and returns the box as cast to an unsafe ptr (see comments for Ty above).
- pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
- TyCtxt::intern_ty(&self.arenas.type_, &self.interner, st)
+ pub fn mk_ty(self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
+ let global_interners = if !self.is_global() {
+ Some(&self.global_interners)
+ } else {
+ None
+ };
+ self.interners.intern_ty(st, global_interners)
}
- pub fn mk_mach_int(&self, tm: ast::IntTy) -> Ty<'tcx> {
+ pub fn mk_mach_int(self, tm: ast::IntTy) -> Ty<'tcx> {
match tm {
ast::IntTy::Is => self.types.isize,
ast::IntTy::I8 => self.types.i8,
}
}
- pub fn mk_mach_uint(&self, tm: ast::UintTy) -> Ty<'tcx> {
+ pub fn mk_mach_uint(self, tm: ast::UintTy) -> Ty<'tcx> {
match tm {
ast::UintTy::Us => self.types.usize,
ast::UintTy::U8 => self.types.u8,
}
}
- pub fn mk_mach_float(&self, tm: ast::FloatTy) -> Ty<'tcx> {
+ pub fn mk_mach_float(self, tm: ast::FloatTy) -> Ty<'tcx> {
match tm {
ast::FloatTy::F32 => self.types.f32,
ast::FloatTy::F64 => self.types.f64,
}
}
- pub fn mk_str(&self) -> Ty<'tcx> {
+ pub fn mk_str(self) -> Ty<'tcx> {
self.mk_ty(TyStr)
}
- pub fn mk_static_str(&self) -> Ty<'tcx> {
+ pub fn mk_static_str(self) -> Ty<'tcx> {
self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str())
}
- pub fn mk_enum(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+ pub fn mk_enum(self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
self.mk_ty(TyEnum(def, substs))
}
- pub fn mk_box(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ty(TyBox(ty))
}
- pub fn mk_ptr(&self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
self.mk_ty(TyRawPtr(tm))
}
- pub fn mk_ref(&self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ pub fn mk_ref(self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
self.mk_ty(TyRef(r, tm))
}
- pub fn mk_mut_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn mk_mut_ref(self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable})
}
- pub fn mk_imm_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn mk_imm_ref(self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
}
- pub fn mk_mut_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable})
}
- pub fn mk_imm_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable})
}
- pub fn mk_nil_ptr(&self) -> Ty<'tcx> {
+ pub fn mk_nil_ptr(self) -> Ty<'tcx> {
self.mk_imm_ptr(self.mk_nil())
}
- pub fn mk_array(&self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> {
+ pub fn mk_array(self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> {
self.mk_ty(TyArray(ty, n))
}
- pub fn mk_slice(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
self.mk_ty(TySlice(ty))
}
- pub fn mk_tup(&self, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
- self.mk_ty(TyTuple(ts))
+ pub fn mk_tup(self, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
+ self.mk_ty(TyTuple(self.mk_type_list(ts)))
}
- pub fn mk_nil(&self) -> Ty<'tcx> {
+ pub fn mk_nil(self) -> Ty<'tcx> {
self.mk_tup(Vec::new())
}
- pub fn mk_bool(&self) -> Ty<'tcx> {
+ pub fn mk_bool(self) -> Ty<'tcx> {
self.mk_ty(TyBool)
}
- pub fn mk_fn_def(&self, def_id: DefId,
+ pub fn mk_fn_def(self, def_id: DefId,
substs: &'tcx Substs<'tcx>,
- fty: BareFnTy<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyFnDef(def_id, substs, self.mk_bare_fn(fty)))
+ fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TyFnDef(def_id, substs, fty))
}
- pub fn mk_fn_ptr(&self, fty: BareFnTy<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyFnPtr(self.mk_bare_fn(fty)))
+ pub fn mk_fn_ptr(self, fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(TyFnPtr(fty))
}
- pub fn mk_trait(&self,
+ pub fn mk_trait(self,
principal: ty::PolyTraitRef<'tcx>,
bounds: ExistentialBounds<'tcx>)
-> Ty<'tcx>
self.mk_ty(TyTrait(inner))
}
- pub fn mk_projection(&self,
+ pub fn mk_projection(self,
trait_ref: TraitRef<'tcx>,
item_name: Name)
-> Ty<'tcx> {
self.mk_ty(TyProjection(inner))
}
- pub fn mk_struct(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+ pub fn mk_struct(self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
self.mk_ty(TyStruct(def, substs))
}
- pub fn mk_closure(&self,
+ pub fn mk_closure(self,
closure_id: DefId,
substs: &'tcx Substs<'tcx>,
tys: Vec<Ty<'tcx>>)
-> Ty<'tcx> {
- self.mk_closure_from_closure_substs(closure_id, Box::new(ClosureSubsts {
+ self.mk_closure_from_closure_substs(closure_id, ClosureSubsts {
func_substs: substs,
- upvar_tys: tys
- }))
+ upvar_tys: self.mk_type_list(tys)
+ })
}
- pub fn mk_closure_from_closure_substs(&self,
+ pub fn mk_closure_from_closure_substs(self,
closure_id: DefId,
- closure_substs: Box<ClosureSubsts<'tcx>>)
+ closure_substs: ClosureSubsts<'tcx>)
-> Ty<'tcx> {
self.mk_ty(TyClosure(closure_id, closure_substs))
}
- pub fn mk_var(&self, v: TyVid) -> Ty<'tcx> {
+ pub fn mk_var(self, v: TyVid) -> Ty<'tcx> {
self.mk_infer(TyVar(v))
}
- pub fn mk_int_var(&self, v: IntVid) -> Ty<'tcx> {
+ pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> {
self.mk_infer(IntVar(v))
}
- pub fn mk_float_var(&self, v: FloatVid) -> Ty<'tcx> {
+ pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> {
self.mk_infer(FloatVar(v))
}
- pub fn mk_infer(&self, it: InferTy) -> Ty<'tcx> {
+ pub fn mk_infer(self, it: InferTy) -> Ty<'tcx> {
self.mk_ty(TyInfer(it))
}
- pub fn mk_param(&self,
+ pub fn mk_param(self,
space: subst::ParamSpace,
index: u32,
name: Name) -> Ty<'tcx> {
self.mk_ty(TyParam(ParamTy { space: space, idx: index, name: name }))
}
- pub fn mk_self_type(&self) -> Ty<'tcx> {
+ pub fn mk_self_type(self) -> Ty<'tcx> {
self.mk_param(subst::SelfSpace, 0, keywords::SelfType.name())
}
- pub fn mk_param_from_def(&self, def: &ty::TypeParameterDef) -> Ty<'tcx> {
+ pub fn mk_param_from_def(self, def: &ty::TypeParameterDef) -> Ty<'tcx> {
self.mk_param(def.space, def.index, def.name)
}
- pub fn trait_items(&self, trait_did: DefId) -> Rc<Vec<ty::ImplOrTraitItem<'tcx>>> {
+ pub fn trait_items(self, trait_did: DefId) -> Rc<Vec<ty::ImplOrTraitItem<'gcx>>> {
self.trait_items_cache.memoize(trait_did, || {
let def_ids = self.trait_item_def_ids(trait_did);
Rc::new(def_ids.iter()
}
/// Obtain the representation annotation for a struct definition.
- pub fn lookup_repr_hints(&self, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
+ pub fn lookup_repr_hints(self, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
self.repr_hint_cache.memoize(did, || {
Rc::new(if did.is_local() {
self.get_attrs(did).iter().flat_map(|meta| {
}
}
-impl<'tcx> ty::TyS<'tcx> {
- fn sort_string(&self, cx: &TyCtxt) -> String {
+impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
+ fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> String {
match self.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) |
ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr => self.to_string(),
ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(),
- ty::TyEnum(def, _) => format!("enum `{}`", cx.item_path_str(def.did)),
+ ty::TyEnum(def, _) => format!("enum `{}`", tcx.item_path_str(def.did)),
ty::TyBox(_) => "box".to_string(),
ty::TyArray(_, n) => format!("array of {} elements", n),
ty::TySlice(_) => "slice".to_string(),
ty::TyFnDef(..) => format!("fn item"),
ty::TyFnPtr(_) => "fn pointer".to_string(),
ty::TyTrait(ref inner) => {
- format!("trait {}", cx.item_path_str(inner.principal_def_id()))
+ format!("trait {}", tcx.item_path_str(inner.principal_def_id()))
}
ty::TyStruct(def, _) => {
- format!("struct `{}`", cx.item_path_str(def.did))
+ format!("struct `{}`", tcx.item_path_str(def.did))
}
ty::TyClosure(..) => "closure".to_string(),
ty::TyTuple(_) => "tuple".to_string(),
}
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn note_and_explain_type_err(&self,
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn note_and_explain_type_err(self,
db: &mut DiagnosticBuilder,
err: &TypeError<'tcx>,
sp: Span) {
/// then we can't say much about whether two types would unify. Put another way,
/// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they
/// are to be considered bound.
-pub fn simplify_type(tcx: &TyCtxt,
- ty: Ty,
- can_simplify_params: bool)
- -> Option<SimplifiedType>
+pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty,
+ can_simplify_params: bool)
+ -> Option<SimplifiedType>
{
match ty.sty {
ty::TyBool => Some(BoolSimplifiedType),
self.add_tys(&substs.upvar_tys);
}
- &ty::TyInfer(_) => {
+ &ty::TyInfer(infer) => {
self.add_flags(TypeFlags::HAS_LOCAL_NAMES); // it might, right?
- self.add_flags(TypeFlags::HAS_TY_INFER)
+ self.add_flags(TypeFlags::HAS_TY_INFER);
+ match infer {
+ ty::FreshTy(_) |
+ ty::FreshIntTy(_) |
+ ty::FreshFloatTy(_) => {}
+ _ => self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX)
+ }
}
&ty::TyEnum(_, substs) | &ty::TyStruct(_, substs) => {
fn add_region(&mut self, r: ty::Region) {
match r {
ty::ReVar(..) |
- ty::ReSkolemized(..) => { self.add_flags(TypeFlags::HAS_RE_INFER); }
+ ty::ReSkolemized(..) => {
+ self.add_flags(TypeFlags::HAS_RE_INFER);
+ self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX);
+ }
ty::ReLateBound(debruijn, _) => { self.add_depth(debruijn.depth); }
ty::ReEarlyBound(..) => { self.add_flags(TypeFlags::HAS_RE_EARLY_BOUND); }
ty::ReStatic => {}
/// The TypeFoldable trait is implemented for every type that can be folded.
/// Basically, every type that has a corresponding method in TypeFolder.
pub trait TypeFoldable<'tcx>: fmt::Debug + Clone {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self;
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self;
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
self.super_fold_with(folder)
}
/// default implementation that does an "identity" fold. Within each
/// identity fold, it should invoke `foo.fold_with(self)` to fold each
/// sub-item.
-pub trait TypeFolder<'tcx> : Sized {
- fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
+pub trait TypeFolder<'gcx: 'tcx, 'tcx> : Sized {
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>;
fn fold_binder<T>(&mut self, t: &Binder<T>) -> Binder<T>
where T : TypeFoldable<'tcx>
}
fn fold_substs(&mut self,
- substs: &subst::Substs<'tcx>)
- -> subst::Substs<'tcx> {
+ substs: &'tcx subst::Substs<'tcx>)
+ -> &'tcx subst::Substs<'tcx> {
substs.super_fold_with(self)
}
}
fn fold_bare_fn_ty(&mut self,
- fty: &ty::BareFnTy<'tcx>)
- -> ty::BareFnTy<'tcx>
+ fty: &'tcx ty::BareFnTy<'tcx>)
+ -> &'tcx ty::BareFnTy<'tcx>
{
fty.super_fold_with(self)
}
///////////////////////////////////////////////////////////////////////////
// Some sample folders
-pub struct BottomUpFolder<'a, 'tcx: 'a, F> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> {
- pub tcx: &'a TyCtxt<'tcx>,
+pub struct BottomUpFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a, F>
+ where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
+{
+ pub tcx: TyCtxt<'a, 'gcx, 'tcx>,
pub fldop: F,
}
-impl<'a, 'tcx, F> TypeFolder<'tcx> for BottomUpFolder<'a, 'tcx, F> where
- F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+impl<'a, 'gcx, 'tcx, F> TypeFolder<'gcx, 'tcx> for BottomUpFolder<'a, 'gcx, 'tcx, F>
+ where F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
{
- fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
let t1 = ty.super_fold_with(self);
///////////////////////////////////////////////////////////////////////////
// Region folder
-impl<'tcx> TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// Collects the free and escaping regions in `value` into `region_set`. Returns
/// whether any late-bound regions were skipped
- pub fn collect_regions<T>(&self,
+ pub fn collect_regions<T>(self,
value: &T,
region_set: &mut FnvHashSet<ty::Region>)
-> bool
/// Folds the escaping and free regions in `value` using `f`, and
/// sets `skipped_regions` to true if any late-bound region was found
/// and skipped.
- pub fn fold_regions<T,F>(&self,
+ pub fn fold_regions<T,F>(self,
value: &T,
skipped_regions: &mut bool,
mut f: F)
/// visited by this folder; only regions that occur free will be
/// visited by `fld_r`.
-pub struct RegionFolder<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+pub struct RegionFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
skipped_regions: &'a mut bool,
current_depth: u32,
fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
}
-impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
- pub fn new<F>(tcx: &'a TyCtxt<'tcx>,
+impl<'a, 'gcx, 'tcx> RegionFolder<'a, 'gcx, 'tcx> {
+ pub fn new<F>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
skipped_regions: &'a mut bool,
- fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
+ fld_r: &'a mut F) -> RegionFolder<'a, 'gcx, 'tcx>
where F : FnMut(ty::Region, u32) -> ty::Region
{
RegionFolder {
}
}
-impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx>
-{
- fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
self.current_depth += 1;
// Replaces the escaping regions in a type.
-struct RegionReplacer<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
current_depth: u32,
fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region + 'a),
map: FnvHashMap<ty::BoundRegion, ty::Region>
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn replace_late_bound_regions<T,F>(&self,
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn replace_late_bound_regions<T,F>(self,
value: &Binder<T>,
mut f: F)
-> (T, FnvHashMap<ty::BoundRegion, ty::Region>)
/// Replace any late-bound regions bound in `value` with free variants attached to scope-id
/// `scope_id`.
- pub fn liberate_late_bound_regions<T>(&self,
+ pub fn liberate_late_bound_regions<T>(self,
all_outlive_scope: region::CodeExtent,
value: &Binder<T>)
-> T
/// Flattens two binding levels into one. So `for<'a> for<'b> Foo`
/// becomes `for<'a,'b> Foo`.
- pub fn flatten_late_bound_regions<T>(&self, bound2_value: &Binder<Binder<T>>)
+ pub fn flatten_late_bound_regions<T>(self, bound2_value: &Binder<Binder<T>>)
-> Binder<T>
where T: TypeFoldable<'tcx>
{
Binder(value)
}
- pub fn no_late_bound_regions<T>(&self, value: &Binder<T>) -> Option<T>
+ pub fn no_late_bound_regions<T>(self, value: &Binder<T>) -> Option<T>
where T : TypeFoldable<'tcx>
{
if value.0.has_escaping_regions() {
/// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
/// method lookup and a few other places where precise region relationships are not required.
- pub fn erase_late_bound_regions<T>(&self, value: &Binder<T>) -> T
+ pub fn erase_late_bound_regions<T>(self, value: &Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
self.replace_late_bound_regions(value, |_| ty::ReStatic).0
/// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
/// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
/// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
- pub fn anonymize_late_bound_regions<T>(&self, sig: &Binder<T>) -> Binder<T>
+ pub fn anonymize_late_bound_regions<T>(self, sig: &Binder<T>) -> Binder<T>
where T : TypeFoldable<'tcx>,
{
let mut counter = 0;
}
}
-impl<'a, 'tcx> RegionReplacer<'a, 'tcx> {
- fn new<F>(tcx: &'a TyCtxt<'tcx>, fld_r: &'a mut F) -> RegionReplacer<'a, 'tcx>
+impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> {
+ fn new<F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, fld_r: &'a mut F)
+ -> RegionReplacer<'a, 'gcx, 'tcx>
where F : FnMut(ty::BoundRegion) -> ty::Region
{
RegionReplacer {
}
}
-impl<'a, 'tcx> TypeFolder<'tcx> for RegionReplacer<'a, 'tcx>
-{
- fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
self.current_depth += 1;
///////////////////////////////////////////////////////////////////////////
// Region eraser
-impl<'tcx> TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// Returns an equivalent value with all free regions removed (note
/// that late-bound regions remain, because they are important for
/// subtyping, but they are anonymized and normalized as well)..
- pub fn erase_regions<T>(&self, value: &T) -> T
+ pub fn erase_regions<T>(self, value: &T) -> T
where T : TypeFoldable<'tcx>
{
let value1 = value.fold_with(&mut RegionEraser(self));
value, value1);
return value1;
- struct RegionEraser<'a, 'tcx: 'a>(&'a TyCtxt<'tcx>);
+ struct RegionEraser<'a, 'gcx: 'a+'tcx, 'tcx: 'a>(TyCtxt<'a, 'gcx, 'tcx>);
- impl<'a, 'tcx> TypeFolder<'tcx> for RegionEraser<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> { self.0 }
+ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionEraser<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.0 }
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match self.tcx().normalized_cache.borrow().get(&ty).cloned() {
Some(u) => return u
}
- let t_norm = ty.super_fold_with(self);
- self.tcx().normalized_cache.borrow_mut().insert(ty, t_norm);
- return t_norm;
+ // FIXME(eddyb) should local contexts have a cache too?
+ if let Some(ty_lifted) = self.tcx().lift_to_global(&ty) {
+ let tcx = self.tcx().global_tcx();
+ let t_norm = ty_lifted.super_fold_with(&mut RegionEraser(tcx));
+ tcx.normalized_cache.borrow_mut().insert(ty_lifted, t_norm);
+ t_norm
+ } else {
+ ty.super_fold_with(self)
+ }
}
fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
_ => ty::ReStatic
}
}
-
- fn fold_substs(&mut self,
- substs: &subst::Substs<'tcx>)
- -> subst::Substs<'tcx> {
- subst::Substs { regions: substs.regions.fold_with(self),
- types: substs.types.fold_with(self) }
- }
}
}
}
}
}
-pub fn shift_regions<'tcx, T:TypeFoldable<'tcx>>(tcx: &TyCtxt<'tcx>,
- amount: u32, value: &T) -> T {
+pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ amount: u32, value: &T) -> T
+ where T: TypeFoldable<'tcx>
+{
debug!("shift_regions(value={:?}, amount={})",
value, amount);
use ty::{self, Ty, TyCtxt};
use syntax::ast;
-impl<'tcx> TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// Returns a string identifying this def-id. This string is
/// suitable for user output. It is relative to the current crate
/// root.
- pub fn item_path_str(&self, def_id: DefId) -> String {
+ pub fn item_path_str(self, def_id: DefId) -> String {
let mut buffer = LocalPathBuffer::new(RootMode::Local);
self.push_item_path(&mut buffer, def_id);
buffer.into_string()
}
/// Returns a string identifying this local node-id.
- pub fn node_path_str(&self, id: ast::NodeId) -> String {
+ pub fn node_path_str(self, id: ast::NodeId) -> String {
self.item_path_str(self.map.local_def_id(id))
}
/// Returns a string identifying this def-id. This string is
/// suitable for user output. It always begins with a crate identifier.
- pub fn absolute_item_path_str(&self, def_id: DefId) -> String {
+ pub fn absolute_item_path_str(self, def_id: DefId) -> String {
let mut buffer = LocalPathBuffer::new(RootMode::Absolute);
self.push_item_path(&mut buffer, def_id);
buffer.into_string()
/// Returns the "path" to a particular crate. This can proceed in
/// various ways, depending on the `root_mode` of the `buffer`.
/// (See `RootMode` enum for more details.)
- pub fn push_krate_path<T>(&self, buffer: &mut T, cnum: ast::CrateNum)
+ pub fn push_krate_path<T>(self, buffer: &mut T, cnum: ast::CrateNum)
where T: ItemPathBuffer
{
match *buffer.root_mode() {
/// If possible, this pushes a global path resolving to `external_def_id` that is visible
/// from at least one local module and returns true. If the crate defining `external_def_id` is
/// declared with an `extern crate`, the path is guarenteed to use the `extern crate`.
- pub fn try_push_visible_item_path<T>(&self, buffer: &mut T, external_def_id: DefId) -> bool
+ pub fn try_push_visible_item_path<T>(self, buffer: &mut T, external_def_id: DefId) -> bool
where T: ItemPathBuffer
{
let visible_parent_map = self.sess.cstore.visible_parent_map();
}
}
- pub fn push_item_path<T>(&self, buffer: &mut T, def_id: DefId)
+ pub fn push_item_path<T>(self, buffer: &mut T, def_id: DefId)
where T: ItemPathBuffer
{
match *buffer.root_mode() {
}
}
- fn push_impl_path<T>(&self,
+ fn push_impl_path<T>(self,
buffer: &mut T,
impl_def_id: DefId)
where T: ItemPathBuffer
}
}
- fn push_impl_path_fallback<T>(&self,
+ fn push_impl_path_fallback<T>(self,
buffer: &mut T,
impl_def_id: DefId)
where T: ItemPathBuffer
/// function tries to find a "characteristic def-id" for a
/// type. It's just a heuristic so it makes some questionable
/// decisions and we may want to adjust it later.
-pub fn characteristic_def_id_of_type<'tcx>(ty: Ty<'tcx>) -> Option<DefId> {
+pub fn characteristic_def_id_of_type(ty: Ty) -> Option<DefId> {
match ty.sty {
ty::TyStruct(adt_def, _) |
ty::TyEnum(adt_def, _) => Some(adt_def.did),
pub use self::Layout::*;
pub use self::Primitive::*;
-use infer::{InferCtxt, drain_fulfillment_cx_or_panic};
+use infer::InferCtxt;
use session::Session;
use traits;
use ty::{self, Ty, TyCtxt, TypeFoldable};
/// signed discriminant range and #[repr] attribute.
/// N.B.: u64 values above i64::MAX will be treated as signed, but
/// that shouldn't affect anything, other than maybe debuginfo.
- pub fn repr_discr(tcx: &TyCtxt, hint: attr::ReprAttr, min: i64, max: i64)
+ pub fn repr_discr(tcx: TyCtxt, hint: attr::ReprAttr, min: i64, max: i64)
-> (Integer, bool) {
// Theoretically, negative values could be larger in unsigned representation
// than the unsigned representation of the signed minimum. However, if there
pub offset_after_field: Vec<Size>
}
-impl Struct {
+impl<'a, 'gcx, 'tcx> Struct {
pub fn new(dl: &TargetDataLayout, packed: bool) -> Struct {
Struct {
align: if packed { dl.i8_align } else { dl.aggregate_align },
}
/// Extend the Struct with more fields.
- pub fn extend<'a, 'tcx, I>(&mut self, dl: &TargetDataLayout,
- fields: I,
- scapegoat: Ty<'tcx>)
- -> Result<(), LayoutError<'tcx>>
- where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
+ pub fn extend<I>(&mut self, dl: &TargetDataLayout,
+ fields: I,
+ scapegoat: Ty<'gcx>)
+ -> Result<(), LayoutError<'gcx>>
+ where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
self.offset_after_field.reserve(fields.size_hint().0);
for field in fields {
}
/// Determine whether a structure would be zero-sized, given its fields.
- pub fn would_be_zero_sized<'a, 'tcx, I>(dl: &TargetDataLayout, fields: I)
- -> Result<bool, LayoutError<'tcx>>
- where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
+ pub fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
+ -> Result<bool, LayoutError<'gcx>>
+ where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
for field in fields {
let field = field?;
if field.is_unsized() || field.size(dl).bytes() > 0 {
/// Find the path leading to a non-zero leaf field, starting from
/// the given type and recursing through aggregates.
// FIXME(eddyb) track value ranges and traverse already optimized enums.
- pub fn non_zero_field_in_type<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- ty: Ty<'tcx>)
- -> Result<Option<FieldPath>, LayoutError<'tcx>> {
- let tcx = infcx.tcx;
+ pub fn non_zero_field_in_type(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty<'gcx>)
+ -> Result<Option<FieldPath>, LayoutError<'gcx>> {
+ let tcx = infcx.tcx.global_tcx();
match (ty.layout(infcx)?, &ty.sty) {
(&Scalar { non_zero: true, .. }, _) => Ok(Some(vec![])),
(&FatPointer { non_zero: true, .. }, _) => {
// Perhaps one of the upvars of this closure is non-zero
// Let's recurse and find out!
- (_, &ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. })) |
+ (_, &ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. })) |
// Can we use one of the fields in this tuple?
- (_, &ty::TyTuple(ref tys)) => {
+ (_, &ty::TyTuple(tys)) => {
Struct::non_zero_field_path(infcx, tys.iter().cloned())
}
/// Find the path leading to a non-zero leaf field, starting from
/// the given set of fields and recursing through aggregates.
- pub fn non_zero_field_path<'a, 'tcx, I>(infcx: &InferCtxt<'a, 'tcx>,
- fields: I)
- -> Result<Option<FieldPath>, LayoutError<'tcx>>
- where I: Iterator<Item=Ty<'tcx>> {
+ pub fn non_zero_field_path<I>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ fields: I)
+ -> Result<Option<FieldPath>, LayoutError<'gcx>>
+ where I: Iterator<Item=Ty<'gcx>> {
for (i, ty) in fields.enumerate() {
if let Some(mut path) = Struct::non_zero_field_in_type(infcx, ty)? {
path.push(i as u32);
}
/// Helper function for normalizing associated types in an inference context.
-fn normalize_associated_type<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- ty: Ty<'tcx>)
- -> Ty<'tcx> {
+fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty<'gcx>)
+ -> Ty<'gcx> {
if !ty.has_projection_types() {
return ty;
}
fulfill_cx.register_predicate_obligation(infcx, obligation);
}
- drain_fulfillment_cx_or_panic(DUMMY_SP, infcx, &mut fulfill_cx, &result)
+ infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
}
-impl Layout {
- pub fn compute_uncached<'a, 'tcx>(ty: Ty<'tcx>,
- infcx: &InferCtxt<'a, 'tcx>)
- -> Result<Layout, LayoutError<'tcx>> {
- let tcx = infcx.tcx;
+impl<'a, 'gcx, 'tcx> Layout {
+ pub fn compute_uncached(ty: Ty<'gcx>,
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>)
+ -> Result<Layout, LayoutError<'gcx>> {
+ let tcx = infcx.tcx.global_tcx();
let dl = &tcx.data_layout;
assert!(!ty.has_infer_types());
ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let non_zero = !ty.is_unsafe_ptr();
- if pointee.is_sized(&infcx.parameter_environment, DUMMY_SP) {
+ if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
Scalar { value: Pointer, non_zero: non_zero }
} else {
let unsized_part = tcx.struct_tail(pointee);
}
// Tuples.
- ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) |
- ty::TyTuple(ref tys) => {
+ ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) |
+ ty::TyTuple(tys) => {
let mut st = Struct::new(dl, false);
st.extend(dl, tys.iter().map(|ty| ty.layout(infcx)), ty)?;
Univariant { variant: st, non_zero: false }
// the unsized field. Several other pieces of code assume that the unsized
// field is definitely the last one.
if def.dtor_kind().has_drop_flag() &&
- ty.is_sized(&infcx.parameter_environment, DUMMY_SP) {
+ ty.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
st.extend(dl, Some(Ok(&Scalar {
value: Int(I8),
non_zero: false
}
}
-impl<'tcx> SizeSkeleton<'tcx> {
- pub fn compute<'a>(ty: Ty<'tcx>, infcx: &InferCtxt<'a, 'tcx>)
- -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
- let tcx = infcx.tcx;
+impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
+ pub fn compute(ty: Ty<'gcx>, infcx: &InferCtxt<'a, 'gcx, 'tcx>)
+ -> Result<SizeSkeleton<'gcx>, LayoutError<'gcx>> {
+ let tcx = infcx.tcx.global_tcx();
assert!(!ty.has_infer_types());
// First try computing a static layout.
/// The complete set of all analyses described in this module. This is
/// produced by the driver and fed to trans and later passes.
+#[derive(Clone)]
pub struct CrateAnalysis<'a> {
pub export_map: ExportMap,
pub access_levels: middle::privacy::AccessLevels,
pub predicates: Vec<Predicate<'tcx>>,
}
-impl<'tcx> ImplHeader<'tcx> {
- pub fn with_fresh_ty_vars<'a>(selcx: &mut traits::SelectionContext<'a, 'tcx>,
- impl_def_id: DefId)
- -> ImplHeader<'tcx>
+impl<'a, 'gcx, 'tcx> ImplHeader<'tcx> {
+ pub fn with_fresh_ty_vars(selcx: &mut traits::SelectionContext<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId)
+ -> ImplHeader<'tcx>
{
let tcx = selcx.tcx();
let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
}
impl Visibility {
- pub fn from_hir(visibility: &hir::Visibility, id: NodeId, tcx: &TyCtxt) -> Self {
+ pub fn from_hir(visibility: &hir::Visibility, id: NodeId, tcx: TyCtxt) -> Self {
match *visibility {
hir::Public => Visibility::Public,
hir::Visibility::Crate => Visibility::Restricted(ast::CRATE_NODE_ID),
pub name: Name,
pub generics: Generics<'tcx>,
pub predicates: GenericPredicates<'tcx>,
- pub fty: BareFnTy<'tcx>,
+ pub fty: &'tcx BareFnTy<'tcx>,
pub explicit_self: ExplicitSelfCategory,
pub vis: Visibility,
pub defaultness: hir::Defaultness,
pub fn new(name: Name,
generics: ty::Generics<'tcx>,
predicates: GenericPredicates<'tcx>,
- fty: BareFnTy<'tcx>,
+ fty: &'tcx BareFnTy<'tcx>,
explicit_self: ExplicitSelfCategory,
vis: Visibility,
defaultness: hir::Defaultness,
// that are local to a particular fn
const HAS_LOCAL_NAMES = 1 << 9,
+ // Present if the type belongs in a local type context.
+ // Only set for TyInfer other than Fresh.
+ const KEEP_IN_LOCAL_TCX = 1 << 10,
+
const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits |
TypeFlags::HAS_SELF.bits |
TypeFlags::HAS_RE_EARLY_BOUND.bits,
TypeFlags::HAS_TY_ERR.bits |
TypeFlags::HAS_PROJECTION.bits |
TypeFlags::HAS_TY_CLOSURE.bits |
- TypeFlags::HAS_LOCAL_NAMES.bits,
+ TypeFlags::HAS_LOCAL_NAMES.bits |
+ TypeFlags::KEEP_IN_LOCAL_TCX.bits,
// Caches for type_is_sized, type_moves_by_default
const SIZEDNESS_CACHED = 1 << 16,
pub predicates: VecPerParamSpace<Predicate<'tcx>>,
}
-impl<'tcx> GenericPredicates<'tcx> {
+impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> {
pub fn empty() -> GenericPredicates<'tcx> {
GenericPredicates {
predicates: VecPerParamSpace::empty(),
}
}
- pub fn instantiate(&self, tcx: &TyCtxt<'tcx>, substs: &Substs<'tcx>)
+ pub fn instantiate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>)
-> InstantiatedPredicates<'tcx> {
InstantiatedPredicates {
predicates: self.predicates.subst(tcx, substs),
}
}
- pub fn instantiate_supertrait(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn instantiate_supertrait(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
poly_trait_ref: &ty::PolyTraitRef<'tcx>)
-> InstantiatedPredicates<'tcx>
{
InstantiatedPredicates {
- predicates: self.predicates.map(|pred| pred.subst_supertrait(tcx, poly_trait_ref))
+ predicates: self.predicates.map(|pred| {
+ pred.subst_supertrait(tcx, poly_trait_ref)
+ })
}
}
}
ClosureKind(DefId, ClosureKind),
}
-impl<'tcx> Predicate<'tcx> {
+impl<'a, 'gcx, 'tcx> Predicate<'tcx> {
/// Performs a substitution suitable for going from a
/// poly-trait-ref to supertraits that must hold if that
/// poly-trait-ref holds. This is slightly different from a normal
/// substitution in terms of what happens with bound regions. See
/// lengthy comment below for details.
- pub fn subst_supertrait(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn subst_supertrait(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_ref: &ty::PolyTraitRef<'tcx>)
-> ty::Predicate<'tcx>
{
/// future I hope to refine the representation of types so as to make
/// more distinctions clearer.
#[derive(Clone)]
-pub struct ParameterEnvironment<'a, 'tcx:'a> {
- pub tcx: &'a TyCtxt<'tcx>,
-
+pub struct ParameterEnvironment<'tcx> {
/// See `construct_free_substs` for details.
- pub free_substs: Substs<'tcx>,
+ pub free_substs: &'tcx Substs<'tcx>,
/// Each type parameter has an implicit region bound that
/// indicates it must outlive at least the function body (the user
/// into Obligations, and elaborated and normalized.
pub caller_bounds: Vec<ty::Predicate<'tcx>>,
- /// Caches the results of trait selection. This cache is used
- /// for things that have to do with the parameters in scope.
- pub selection_cache: traits::SelectionCache<'tcx>,
-
- /// Caches the results of trait evaluation.
- pub evaluation_cache: traits::EvaluationCache<'tcx>,
-
/// Scope that is attached to free regions for this scope. This
/// is usually the id of the fn body, but for more abstract scopes
/// like structs we often use the node-id of the struct.
pub free_id_outlive: CodeExtent,
}
-impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> {
+impl<'a, 'tcx> ParameterEnvironment<'tcx> {
pub fn with_caller_bounds(&self,
caller_bounds: Vec<ty::Predicate<'tcx>>)
- -> ParameterEnvironment<'a,'tcx>
+ -> ParameterEnvironment<'tcx>
{
ParameterEnvironment {
- tcx: self.tcx,
- free_substs: self.free_substs.clone(),
+ free_substs: self.free_substs,
implicit_region_bound: self.implicit_region_bound,
caller_bounds: caller_bounds,
- selection_cache: traits::SelectionCache::new(),
- evaluation_cache: traits::EvaluationCache::new(),
free_id_outlive: self.free_id_outlive,
}
}
/// Construct a parameter environment given an item, impl item, or trait item
- pub fn for_item(cx: &'a TyCtxt<'tcx>, id: NodeId) -> ParameterEnvironment<'a, 'tcx> {
- match cx.map.find(id) {
+ pub fn for_item(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: NodeId)
+ -> ParameterEnvironment<'tcx> {
+ match tcx.map.find(id) {
Some(ast_map::NodeImplItem(ref impl_item)) => {
match impl_item.node {
hir::ImplItemKind::Type(_) => {
// associated types don't have their own entry (for some reason),
// so for now just grab environment for the impl
- let impl_id = cx.map.get_parent(id);
- let impl_def_id = cx.map.local_def_id(impl_id);
- let scheme = cx.lookup_item_type(impl_def_id);
- let predicates = cx.lookup_predicates(impl_def_id);
- cx.construct_parameter_environment(impl_item.span,
- &scheme.generics,
- &predicates,
- cx.region_maps.item_extent(id))
+ let impl_id = tcx.map.get_parent(id);
+ let impl_def_id = tcx.map.local_def_id(impl_id);
+ let scheme = tcx.lookup_item_type(impl_def_id);
+ let predicates = tcx.lookup_predicates(impl_def_id);
+ tcx.construct_parameter_environment(impl_item.span,
+ &scheme.generics,
+ &predicates,
+ tcx.region_maps.item_extent(id))
}
hir::ImplItemKind::Const(_, _) => {
- let def_id = cx.map.local_def_id(id);
- let scheme = cx.lookup_item_type(def_id);
- let predicates = cx.lookup_predicates(def_id);
- cx.construct_parameter_environment(impl_item.span,
- &scheme.generics,
- &predicates,
- cx.region_maps.item_extent(id))
+ let def_id = tcx.map.local_def_id(id);
+ let scheme = tcx.lookup_item_type(def_id);
+ let predicates = tcx.lookup_predicates(def_id);
+ tcx.construct_parameter_environment(impl_item.span,
+ &scheme.generics,
+ &predicates,
+ tcx.region_maps.item_extent(id))
}
hir::ImplItemKind::Method(_, ref body) => {
- let method_def_id = cx.map.local_def_id(id);
- match cx.impl_or_trait_item(method_def_id) {
+ let method_def_id = tcx.map.local_def_id(id);
+ match tcx.impl_or_trait_item(method_def_id) {
MethodTraitItem(ref method_ty) => {
let method_generics = &method_ty.generics;
let method_bounds = &method_ty.predicates;
- cx.construct_parameter_environment(
+ tcx.construct_parameter_environment(
impl_item.span,
method_generics,
method_bounds,
- cx.region_maps.call_site_extent(id, body.id))
+ tcx.region_maps.call_site_extent(id, body.id))
}
_ => {
bug!("ParameterEnvironment::for_item(): \
hir::TypeTraitItem(..) => {
// associated types don't have their own entry (for some reason),
// so for now just grab environment for the trait
- let trait_id = cx.map.get_parent(id);
- let trait_def_id = cx.map.local_def_id(trait_id);
- let trait_def = cx.lookup_trait_def(trait_def_id);
- let predicates = cx.lookup_predicates(trait_def_id);
- cx.construct_parameter_environment(trait_item.span,
- &trait_def.generics,
- &predicates,
- cx.region_maps.item_extent(id))
+ let trait_id = tcx.map.get_parent(id);
+ let trait_def_id = tcx.map.local_def_id(trait_id);
+ let trait_def = tcx.lookup_trait_def(trait_def_id);
+ let predicates = tcx.lookup_predicates(trait_def_id);
+ tcx.construct_parameter_environment(trait_item.span,
+ &trait_def.generics,
+ &predicates,
+ tcx.region_maps.item_extent(id))
}
hir::ConstTraitItem(..) => {
- let def_id = cx.map.local_def_id(id);
- let scheme = cx.lookup_item_type(def_id);
- let predicates = cx.lookup_predicates(def_id);
- cx.construct_parameter_environment(trait_item.span,
- &scheme.generics,
- &predicates,
- cx.region_maps.item_extent(id))
+ let def_id = tcx.map.local_def_id(id);
+ let scheme = tcx.lookup_item_type(def_id);
+ let predicates = tcx.lookup_predicates(def_id);
+ tcx.construct_parameter_environment(trait_item.span,
+ &scheme.generics,
+ &predicates,
+ tcx.region_maps.item_extent(id))
}
hir::MethodTraitItem(_, ref body) => {
// Use call-site for extent (unless this is a
// trait method with no default; then fallback
// to the method id).
- let method_def_id = cx.map.local_def_id(id);
- match cx.impl_or_trait_item(method_def_id) {
+ let method_def_id = tcx.map.local_def_id(id);
+ match tcx.impl_or_trait_item(method_def_id) {
MethodTraitItem(ref method_ty) => {
let method_generics = &method_ty.generics;
let method_bounds = &method_ty.predicates;
let extent = if let Some(ref body) = *body {
// default impl: use call_site extent as free_id_outlive bound.
- cx.region_maps.call_site_extent(id, body.id)
+ tcx.region_maps.call_site_extent(id, body.id)
} else {
// no default impl: use item extent as free_id_outlive bound.
- cx.region_maps.item_extent(id)
+ tcx.region_maps.item_extent(id)
};
- cx.construct_parameter_environment(
+ tcx.construct_parameter_environment(
trait_item.span,
method_generics,
method_bounds,
match item.node {
hir::ItemFn(_, _, _, _, _, ref body) => {
// We assume this is a function.
- let fn_def_id = cx.map.local_def_id(id);
- let fn_scheme = cx.lookup_item_type(fn_def_id);
- let fn_predicates = cx.lookup_predicates(fn_def_id);
-
- cx.construct_parameter_environment(item.span,
- &fn_scheme.generics,
- &fn_predicates,
- cx.region_maps.call_site_extent(id,
- body.id))
+ let fn_def_id = tcx.map.local_def_id(id);
+ let fn_scheme = tcx.lookup_item_type(fn_def_id);
+ let fn_predicates = tcx.lookup_predicates(fn_def_id);
+
+ tcx.construct_parameter_environment(
+ item.span,
+ &fn_scheme.generics,
+ &fn_predicates,
+ tcx.region_maps.call_site_extent(id, body.id))
}
hir::ItemEnum(..) |
hir::ItemStruct(..) |
hir::ItemImpl(..) |
hir::ItemConst(..) |
hir::ItemStatic(..) => {
- let def_id = cx.map.local_def_id(id);
- let scheme = cx.lookup_item_type(def_id);
- let predicates = cx.lookup_predicates(def_id);
- cx.construct_parameter_environment(item.span,
- &scheme.generics,
- &predicates,
- cx.region_maps.item_extent(id))
+ let def_id = tcx.map.local_def_id(id);
+ let scheme = tcx.lookup_item_type(def_id);
+ let predicates = tcx.lookup_predicates(def_id);
+ tcx.construct_parameter_environment(item.span,
+ &scheme.generics,
+ &predicates,
+ tcx.region_maps.item_extent(id))
}
hir::ItemTrait(..) => {
- let def_id = cx.map.local_def_id(id);
- let trait_def = cx.lookup_trait_def(def_id);
- let predicates = cx.lookup_predicates(def_id);
- cx.construct_parameter_environment(item.span,
- &trait_def.generics,
- &predicates,
- cx.region_maps.item_extent(id))
+ let def_id = tcx.map.local_def_id(id);
+ let trait_def = tcx.lookup_trait_def(def_id);
+ let predicates = tcx.lookup_predicates(def_id);
+ tcx.construct_parameter_environment(item.span,
+ &trait_def.generics,
+ &predicates,
+ tcx.region_maps.item_extent(id))
}
_ => {
span_bug!(item.span,
}
Some(ast_map::NodeExpr(..)) => {
// This is a convenience to allow closures to work.
- ParameterEnvironment::for_item(cx, cx.map.get_parent(id))
+ ParameterEnvironment::for_item(tcx, tcx.map.get_parent(id))
}
Some(ast_map::NodeForeignItem(item)) => {
- let def_id = cx.map.local_def_id(id);
- let scheme = cx.lookup_item_type(def_id);
- let predicates = cx.lookup_predicates(def_id);
- cx.construct_parameter_environment(item.span,
- &scheme.generics,
- &predicates,
- ROOT_CODE_EXTENT)
+ let def_id = tcx.map.local_def_id(id);
+ let scheme = tcx.lookup_item_type(def_id);
+ let predicates = tcx.lookup_predicates(def_id);
+ tcx.construct_parameter_environment(item.span,
+ &scheme.generics,
+ &predicates,
+ ROOT_CODE_EXTENT)
}
_ => {
bug!("ParameterEnvironment::from_item(): \
`{}` is not an item",
- cx.map.node_to_string(id))
+ tcx.map.node_to_string(id))
}
}
}
}
}
-impl<'tcx, 'container> AdtDefData<'tcx, 'container> {
- fn new(tcx: &TyCtxt<'tcx>,
+impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> {
+ fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
did: DefId,
kind: AdtKind,
- variants: Vec<VariantDefData<'tcx, 'container>>) -> Self {
+ variants: Vec<VariantDefData<'gcx, 'container>>) -> Self {
let mut flags = AdtFlags::NO_ADT_FLAGS;
let attrs = tcx.get_attrs(did);
if attr::contains_name(&attrs, "fundamental") {
}
}
- fn calculate_dtorck(&'tcx self, tcx: &TyCtxt<'tcx>) {
+ fn calculate_dtorck(&'gcx self, tcx: TyCtxt) {
if tcx.is_adt_dtorck(self) {
self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK);
}
/// true, this type being safe for destruction requires it to be
/// alive; Otherwise, only the contents are required to be.
#[inline]
- pub fn is_dtorck(&'tcx self, tcx: &TyCtxt<'tcx>) -> bool {
+ pub fn is_dtorck(&'gcx self, tcx: TyCtxt) -> bool {
if !self.flags.get().intersects(AdtFlags::IS_DTORCK_VALID) {
self.calculate_dtorck(tcx)
}
/// Asserts this is a struct and returns the struct's unique
/// variant.
- pub fn struct_variant(&self) -> &VariantDefData<'tcx, 'container> {
+ pub fn struct_variant(&self) -> &VariantDefData<'gcx, 'container> {
assert_eq!(self.adt_kind(), AdtKind::Struct);
&self.variants[0]
}
#[inline]
- pub fn type_scheme(&self, tcx: &TyCtxt<'tcx>) -> TypeScheme<'tcx> {
+ pub fn type_scheme(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> TypeScheme<'gcx> {
tcx.lookup_item_type(self.did)
}
#[inline]
- pub fn predicates(&self, tcx: &TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+ pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> GenericPredicates<'gcx> {
tcx.lookup_predicates(self.did)
}
#[inline]
pub fn all_fields(&self) ->
iter::FlatMap<
- slice::Iter<VariantDefData<'tcx, 'container>>,
- slice::Iter<FieldDefData<'tcx, 'container>>,
- for<'s> fn(&'s VariantDefData<'tcx, 'container>)
- -> slice::Iter<'s, FieldDefData<'tcx, 'container>>
+ slice::Iter<VariantDefData<'gcx, 'container>>,
+ slice::Iter<FieldDefData<'gcx, 'container>>,
+ for<'s> fn(&'s VariantDefData<'gcx, 'container>)
+ -> slice::Iter<'s, FieldDefData<'gcx, 'container>>
> {
self.variants.iter().flat_map(VariantDefData::fields_iter)
}
self.variants.iter().all(|v| v.fields.is_empty())
}
- pub fn variant_with_id(&self, vid: DefId) -> &VariantDefData<'tcx, 'container> {
+ pub fn variant_with_id(&self, vid: DefId) -> &VariantDefData<'gcx, 'container> {
self.variants
.iter()
.find(|v| v.did == vid)
.expect("variant_index_with_id: unknown variant")
}
- pub fn variant_of_def(&self, def: Def) -> &VariantDefData<'tcx, 'container> {
+ pub fn variant_of_def(&self, def: Def) -> &VariantDefData<'gcx, 'container> {
match def {
Def::Variant(_, vid) => self.variant_with_id(vid),
Def::Struct(..) | Def::TyAlias(..) => self.struct_variant(),
None => NoDtor,
}
}
+}
+impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'tcx, 'container> {
/// Returns a simpler type such that `Self: Sized` if and only
/// if that type is Sized, or `TyErr` if this type is recursive.
///
///
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer, e.g. issue #31299.
- pub fn sized_constraint(&self, tcx: &ty::TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
let dep_node = DepNode::SizedConstraint(self.did);
match self.sized_constraint.get(dep_node) {
None => {
- let this = tcx.lookup_adt_def_master(self.did);
- this.calculate_sized_constraint_inner(tcx, &mut Vec::new());
+ let global_tcx = tcx.global_tcx();
+ let this = global_tcx.lookup_adt_def_master(self.did);
+ this.calculate_sized_constraint_inner(global_tcx, &mut Vec::new());
self.sized_constraint(tcx)
}
Some(ty) => ty
}
}
-impl<'tcx> AdtDefData<'tcx, 'tcx> {
+impl<'a, 'tcx> AdtDefData<'tcx, 'tcx> {
/// Calculates the Sized-constraint.
///
/// As the Sized-constraint of enums can be a *set* of types,
/// such.
/// - a TyError, if a type contained itself. The representability
/// check should catch this case.
- fn calculate_sized_constraint_inner(&'tcx self, tcx: &ty::TyCtxt<'tcx>,
+ fn calculate_sized_constraint_inner(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
stack: &mut Vec<AdtDefMaster<'tcx>>)
{
fn sized_constraint_for_ty(
&'tcx self,
- tcx: &ty::TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
stack: &mut Vec<AdtDefMaster<'tcx>>,
ty: Ty<'tcx>
) -> Vec<Ty<'tcx>> {
}
}
-impl<'tcx, 'container> FieldDefData<'tcx, 'container> {
+impl<'a, 'gcx, 'tcx, 'container> FieldDefData<'tcx, 'container> {
pub fn new(did: DefId,
name: Name,
vis: Visibility) -> Self {
}
}
- pub fn ty(&self, tcx: &TyCtxt<'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> {
+ pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> {
self.unsubst_ty().subst(tcx, subst)
}
/// item into the monotype of an item reference.
#[derive(Clone)]
pub struct ItemSubsts<'tcx> {
- pub substs: Substs<'tcx>,
+ pub substs: &'tcx Substs<'tcx>,
}
#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
FnOnce,
}
-impl ClosureKind {
- pub fn trait_did(&self, cx: &TyCtxt) -> DefId {
+impl<'a, 'tcx> ClosureKind {
+ pub fn trait_did(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> DefId {
let result = match *self {
- ClosureKind::Fn => cx.lang_items.require(FnTraitLangItem),
+ ClosureKind::Fn => tcx.lang_items.require(FnTraitLangItem),
ClosureKind::FnMut => {
- cx.lang_items.require(FnMutTraitLangItem)
+ tcx.lang_items.require(FnMutTraitLangItem)
}
ClosureKind::FnOnce => {
- cx.lang_items.require(FnOnceTraitLangItem)
+ tcx.lang_items.require(FnOnceTraitLangItem)
}
};
match result {
Ok(trait_did) => trait_did,
- Err(err) => cx.sess.fatal(&err[..]),
+ Err(err) => tcx.sess.fatal(&err[..]),
}
}
}
impl<'tcx> ItemSubsts<'tcx> {
- pub fn empty() -> ItemSubsts<'tcx> {
- ItemSubsts { substs: Substs::empty() }
- }
-
pub fn is_noop(&self) -> bool {
self.substs.is_noop()
}
}
/// Helper for looking things up in the various maps that are populated during
-/// typeck::collect (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc). All of
+/// typeck::collect (e.g., `tcx.impl_or_trait_items`, `tcx.tcache`, etc). All of
/// these share the pattern that if the id is local, it should have been loaded
/// into the map by the `typeck::collect` phase. If the def-id is external,
/// then we have to go consult the crate loading code (and cache the result for
}
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn node_id_to_type(&self, id: NodeId) -> Ty<'tcx> {
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn node_id_to_type(self, id: NodeId) -> Ty<'gcx> {
match self.node_id_to_type_opt(id) {
Some(ty) => ty,
None => bug!("node_id_to_type: no type for node `{}`",
}
}
- pub fn node_id_to_type_opt(&self, id: NodeId) -> Option<Ty<'tcx>> {
+ pub fn node_id_to_type_opt(self, id: NodeId) -> Option<Ty<'gcx>> {
self.tables.borrow().node_types.get(&id).cloned()
}
- pub fn node_id_item_substs(&self, id: NodeId) -> ItemSubsts<'tcx> {
+ pub fn node_id_item_substs(self, id: NodeId) -> ItemSubsts<'gcx> {
match self.tables.borrow().item_substs.get(&id) {
- None => ItemSubsts::empty(),
+ None => ItemSubsts {
+ substs: self.global_tcx().mk_substs(Substs::empty())
+ },
Some(ts) => ts.clone(),
}
}
// Returns the type of a pattern as a monotype. Like @expr_ty, this function
// doesn't provide type parameter substitutions.
- pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> {
+ pub fn pat_ty(self, pat: &hir::Pat) -> Ty<'gcx> {
self.node_id_to_type(pat.id)
}
- pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option<Ty<'tcx>> {
+ pub fn pat_ty_opt(self, pat: &hir::Pat) -> Option<Ty<'gcx>> {
self.node_id_to_type_opt(pat.id)
}
// NB (2): This type doesn't provide type parameter substitutions; e.g. if you
// ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
// instead of "fn(ty) -> T with T = isize".
- pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> {
+ pub fn expr_ty(self, expr: &hir::Expr) -> Ty<'gcx> {
self.node_id_to_type(expr.id)
}
- pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
+ pub fn expr_ty_opt(self, expr: &hir::Expr) -> Option<Ty<'gcx>> {
self.node_id_to_type_opt(expr.id)
}
/// hard to do, I just hate that code so much I didn't want to touch it
/// unless it was to fix it properly, which seemed a distraction from the
/// thread at hand! -nmatsakis
- pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> Ty<'tcx> {
+ pub fn expr_ty_adjusted(self, expr: &hir::Expr) -> Ty<'gcx> {
self.expr_ty(expr)
- .adjust(self, expr.span, expr.id,
+ .adjust(self.global_tcx(), expr.span, expr.id,
self.tables.borrow().adjustments.get(&expr.id),
|method_call| {
self.tables.borrow().method_map.get(&method_call).map(|method| method.ty)
})
}
- pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr) -> Option<Ty<'tcx>> {
- self.expr_ty_opt(expr).map(|t| t.adjust(self,
+ pub fn expr_ty_adjusted_opt(self, expr: &hir::Expr) -> Option<Ty<'gcx>> {
+ self.expr_ty_opt(expr).map(|t| t.adjust(self.global_tcx(),
expr.span,
expr.id,
self.tables.borrow().adjustments.get(&expr.id),
}))
}
- pub fn expr_span(&self, id: NodeId) -> Span {
+ pub fn expr_span(self, id: NodeId) -> Span {
match self.map.find(id) {
Some(ast_map::NodeExpr(e)) => {
e.span
}
}
- pub fn local_var_name_str(&self, id: NodeId) -> InternedString {
+ pub fn local_var_name_str(self, id: NodeId) -> InternedString {
match self.map.find(id) {
Some(ast_map::NodeLocal(pat)) => {
match pat.node {
}
}
- pub fn resolve_expr(&self, expr: &hir::Expr) -> Def {
+ pub fn resolve_expr(self, expr: &hir::Expr) -> Def {
match self.def_map.borrow().get(&expr.id) {
Some(def) => def.full_def(),
None => {
}
}
- pub fn expr_is_lval(&self, expr: &hir::Expr) -> bool {
+ pub fn expr_is_lval(self, expr: &hir::Expr) -> bool {
match expr.node {
hir::ExprPath(..) => {
// We can't use resolve_expr here, as this needs to run on broken
}
}
- pub fn provided_trait_methods(&self, id: DefId) -> Vec<Rc<Method<'tcx>>> {
+ pub fn provided_trait_methods(self, id: DefId) -> Vec<Rc<Method<'gcx>>> {
if let Some(id) = self.map.as_local_node_id(id) {
if let ItemTrait(_, _, _, ref ms) = self.map.expect_item(id).node {
ms.iter().filter_map(|ti| {
bug!("provided_trait_methods: `{:?}` is not a trait", id)
}
} else {
- self.sess.cstore.provided_trait_methods(self, id)
+ self.sess.cstore.provided_trait_methods(self.global_tcx(), id)
}
}
- pub fn associated_consts(&self, id: DefId) -> Vec<Rc<AssociatedConst<'tcx>>> {
+ pub fn associated_consts(self, id: DefId) -> Vec<Rc<AssociatedConst<'gcx>>> {
if let Some(id) = self.map.as_local_node_id(id) {
match self.map.expect_item(id).node {
ItemTrait(_, _, _, ref tis) => {
}
}
} else {
- self.sess.cstore.associated_consts(self, id)
+ self.sess.cstore.associated_consts(self.global_tcx(), id)
}
}
- pub fn trait_impl_polarity(&self, id: DefId) -> Option<hir::ImplPolarity> {
+ pub fn trait_impl_polarity(self, id: DefId) -> Option<hir::ImplPolarity> {
if let Some(id) = self.map.as_local_node_id(id) {
match self.map.find(id) {
Some(ast_map::NodeItem(item)) => {
}
}
- pub fn custom_coerce_unsized_kind(&self, did: DefId) -> adjustment::CustomCoerceUnsized {
+ pub fn custom_coerce_unsized_kind(self, did: DefId) -> adjustment::CustomCoerceUnsized {
self.custom_coerce_unsized_kinds.memoize(did, || {
let (kind, src) = if did.krate != LOCAL_CRATE {
(self.sess.cstore.custom_coerce_unsized_kind(did), "external")
})
}
- pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> {
+ pub fn impl_or_trait_item(self, id: DefId) -> ImplOrTraitItem<'gcx> {
lookup_locally_or_in_crate_store(
"impl_or_trait_items", id, &self.impl_or_trait_items,
- || self.sess.cstore.impl_or_trait_item(self, id)
+ || self.sess.cstore.impl_or_trait_item(self.global_tcx(), id)
.expect("missing ImplOrTraitItem in metadata"))
}
- pub fn trait_item_def_ids(&self, id: DefId) -> Rc<Vec<ImplOrTraitItemId>> {
+ pub fn trait_item_def_ids(self, id: DefId) -> Rc<Vec<ImplOrTraitItemId>> {
lookup_locally_or_in_crate_store(
"trait_item_def_ids", id, &self.trait_item_def_ids,
|| Rc::new(self.sess.cstore.trait_item_def_ids(id)))
/// Returns the trait-ref corresponding to a given impl, or None if it is
/// an inherent impl.
- pub fn impl_trait_ref(&self, id: DefId) -> Option<TraitRef<'tcx>> {
+ pub fn impl_trait_ref(self, id: DefId) -> Option<TraitRef<'gcx>> {
lookup_locally_or_in_crate_store(
"impl_trait_refs", id, &self.impl_trait_refs,
- || self.sess.cstore.impl_trait_ref(self, id))
+ || self.sess.cstore.impl_trait_ref(self.global_tcx(), id))
}
/// Returns whether this DefId refers to an impl
- pub fn is_impl(&self, id: DefId) -> bool {
+ pub fn is_impl(self, id: DefId) -> bool {
if let Some(id) = self.map.as_local_node_id(id) {
if let Some(ast_map::NodeItem(
&hir::Item { node: hir::ItemImpl(..), .. })) = self.map.find(id) {
}
}
- pub fn trait_ref_to_def_id(&self, tr: &hir::TraitRef) -> DefId {
+ pub fn trait_ref_to_def_id(self, tr: &hir::TraitRef) -> DefId {
self.def_map.borrow().get(&tr.ref_id).expect("no def-map entry for trait").def_id()
}
- pub fn def_key(&self, id: DefId) -> ast_map::DefKey {
+ pub fn def_key(self, id: DefId) -> ast_map::DefKey {
if id.is_local() {
self.map.def_key(id)
} else {
/// Returns the `DefPath` of an item. Note that if `id` is not
/// local to this crate -- or is inlined into this crate -- the
/// result will be a non-local `DefPath`.
- pub fn def_path(&self, id: DefId) -> ast_map::DefPath {
+ pub fn def_path(self, id: DefId) -> ast_map::DefPath {
if id.is_local() {
self.map.def_path(id)
} else {
}
}
- pub fn item_name(&self, id: DefId) -> ast::Name {
+ pub fn item_name(self, id: DefId) -> ast::Name {
if let Some(id) = self.map.as_local_node_id(id) {
self.map.name(id)
} else {
}
// Register a given item type
- pub fn register_item_type(&self, did: DefId, ty: TypeScheme<'tcx>) {
+ pub fn register_item_type(self, did: DefId, ty: TypeScheme<'gcx>) {
self.tcache.borrow_mut().insert(did, ty);
}
// If the given item is in an external crate, looks up its type and adds it to
// the type cache. Returns the type parameters and type.
- pub fn lookup_item_type(&self, did: DefId) -> TypeScheme<'tcx> {
+ pub fn lookup_item_type(self, did: DefId) -> TypeScheme<'gcx> {
lookup_locally_or_in_crate_store(
"tcache", did, &self.tcache,
- || self.sess.cstore.item_type(self, did))
+ || self.sess.cstore.item_type(self.global_tcx(), did))
}
/// Given the did of a trait, returns its canonical trait ref.
- pub fn lookup_trait_def(&self, did: DefId) -> &'tcx TraitDef<'tcx> {
+ pub fn lookup_trait_def(self, did: DefId) -> &'gcx TraitDef<'gcx> {
lookup_locally_or_in_crate_store(
"trait_defs", did, &self.trait_defs,
- || self.alloc_trait_def(self.sess.cstore.trait_def(self, did))
+ || self.alloc_trait_def(self.sess.cstore.trait_def(self.global_tcx(), did))
)
}
/// Given the did of an ADT, return a master reference to its
/// definition. Unless you are planning on fulfilling the ADT's fields,
/// use lookup_adt_def instead.
- pub fn lookup_adt_def_master(&self, did: DefId) -> AdtDefMaster<'tcx> {
+ pub fn lookup_adt_def_master(self, did: DefId) -> AdtDefMaster<'gcx> {
lookup_locally_or_in_crate_store(
"adt_defs", did, &self.adt_defs,
- || self.sess.cstore.adt_def(self, did)
+ || self.sess.cstore.adt_def(self.global_tcx(), did)
)
}
/// Given the did of an ADT, return a reference to its definition.
- pub fn lookup_adt_def(&self, did: DefId) -> AdtDef<'tcx> {
+ pub fn lookup_adt_def(self, did: DefId) -> AdtDef<'gcx> {
// when reverse-variance goes away, a transmute::<AdtDefMaster,AdtDef>
// would be needed here.
self.lookup_adt_def_master(did)
}
/// Given the did of an item, returns its full set of predicates.
- pub fn lookup_predicates(&self, did: DefId) -> GenericPredicates<'tcx> {
+ pub fn lookup_predicates(self, did: DefId) -> GenericPredicates<'gcx> {
lookup_locally_or_in_crate_store(
"predicates", did, &self.predicates,
- || self.sess.cstore.item_predicates(self, did))
+ || self.sess.cstore.item_predicates(self.global_tcx(), did))
}
/// Given the did of a trait, returns its superpredicates.
- pub fn lookup_super_predicates(&self, did: DefId) -> GenericPredicates<'tcx> {
+ pub fn lookup_super_predicates(self, did: DefId) -> GenericPredicates<'gcx> {
lookup_locally_or_in_crate_store(
"super_predicates", did, &self.super_predicates,
- || self.sess.cstore.item_super_predicates(self, did))
+ || self.sess.cstore.item_super_predicates(self.global_tcx(), did))
}
/// If `type_needs_drop` returns true, then `ty` is definitely
///
/// (Note that this implies that if `ty` has a destructor attached,
/// then `type_needs_drop` will definitely return `true` for `ty`.)
- pub fn type_needs_drop_given_env<'a>(&self,
- ty: Ty<'tcx>,
- param_env: &ty::ParameterEnvironment<'a,'tcx>) -> bool {
+ pub fn type_needs_drop_given_env(self,
+ ty: Ty<'gcx>,
+ param_env: &ty::ParameterEnvironment<'gcx>) -> bool {
// Issue #22536: We first query type_moves_by_default. It sees a
// normalized version of the type, and therefore will definitely
// know whether the type implements Copy (and thus needs no
// cleanup/drop/zeroing) ...
- let implements_copy = !ty.moves_by_default(param_env, DUMMY_SP);
+ let tcx = self.global_tcx();
+ let implements_copy = !ty.moves_by_default(tcx, param_env, DUMMY_SP);
if implements_copy { return false; }
// bound attached (see above), it is sound to treat it as having a
// destructor (e.g. zero its memory on move).
- let contents = ty.type_contents(self);
+ let contents = ty.type_contents(tcx);
debug!("type_needs_drop ty={:?} contents={:?}", ty, contents);
- contents.needs_drop(self)
+ contents.needs_drop(tcx)
}
/// Get the attributes of a definition.
- pub fn get_attrs(&self, did: DefId) -> Cow<'tcx, [ast::Attribute]> {
+ pub fn get_attrs(self, did: DefId) -> Cow<'gcx, [ast::Attribute]> {
if let Some(id) = self.map.as_local_node_id(did) {
Cow::Borrowed(self.map.attrs(id))
} else {
}
/// Determine whether an item is annotated with an attribute
- pub fn has_attr(&self, did: DefId, attr: &str) -> bool {
+ pub fn has_attr(self, did: DefId, attr: &str) -> bool {
self.get_attrs(did).iter().any(|item| item.check_name(attr))
}
/// Determine whether an item is annotated with `#[repr(packed)]`
- pub fn lookup_packed(&self, did: DefId) -> bool {
+ pub fn lookup_packed(self, did: DefId) -> bool {
self.lookup_repr_hints(did).contains(&attr::ReprPacked)
}
/// Determine whether an item is annotated with `#[simd]`
- pub fn lookup_simd(&self, did: DefId) -> bool {
+ pub fn lookup_simd(self, did: DefId) -> bool {
self.has_attr(did, "simd")
|| self.lookup_repr_hints(did).contains(&attr::ReprSimd)
}
- pub fn item_variances(&self, item_id: DefId) -> Rc<ItemVariances> {
+ pub fn item_variances(self, item_id: DefId) -> Rc<ItemVariances> {
lookup_locally_or_in_crate_store(
"item_variance_map", item_id, &self.item_variance_map,
|| Rc::new(self.sess.cstore.item_variances(item_id)))
}
- pub fn trait_has_default_impl(&self, trait_def_id: DefId) -> bool {
+ pub fn trait_has_default_impl(self, trait_def_id: DefId) -> bool {
self.populate_implementations_for_trait_if_necessary(trait_def_id);
let def = self.lookup_trait_def(trait_def_id);
}
/// Records a trait-to-implementation mapping.
- pub fn record_trait_has_default_impl(&self, trait_def_id: DefId) {
+ pub fn record_trait_has_default_impl(self, trait_def_id: DefId) {
let def = self.lookup_trait_def(trait_def_id);
def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL)
}
/// Load primitive inherent implementations if necessary
- pub fn populate_implementations_for_primitive_if_necessary(&self,
+ pub fn populate_implementations_for_primitive_if_necessary(self,
primitive_def_id: DefId) {
if primitive_def_id.is_local() {
return
/// Populates the type context with all the inherent implementations for
/// the given type if necessary.
- pub fn populate_inherent_implementations_for_type_if_necessary(&self,
+ pub fn populate_inherent_implementations_for_type_if_necessary(self,
type_id: DefId) {
if type_id.is_local() {
return
/// Populates the type context with all the implementations for the given
/// trait if necessary.
- pub fn populate_implementations_for_trait_if_necessary(&self, trait_id: DefId) {
+ pub fn populate_implementations_for_trait_if_necessary(self, trait_id: DefId) {
if trait_id.is_local() {
return
}
def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID);
}
- pub fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind {
- Tables::closure_kind(&self.tables, self, def_id)
+ pub fn closure_kind(self, def_id: DefId) -> ty::ClosureKind {
+ // If this is a local def-id, it should be inserted into the
+ // tables by typeck; else, it will be retreived from
+ // the external crate metadata.
+ if let Some(&kind) = self.tables.borrow().closure_kinds.get(&def_id) {
+ return kind;
+ }
+
+ let kind = self.sess.cstore.closure_kind(def_id);
+ self.tables.borrow_mut().closure_kinds.insert(def_id, kind);
+ kind
}
- pub fn closure_type(&self,
+ pub fn closure_type(self,
def_id: DefId,
- substs: &ClosureSubsts<'tcx>)
+ substs: ClosureSubsts<'tcx>)
-> ty::ClosureTy<'tcx>
{
- Tables::closure_type(&self.tables, self, def_id, substs)
+ // If this is a local def-id, it should be inserted into the
+ // tables by typeck; else, it will be retreived from
+ // the external crate metadata.
+ if let Some(ty) = self.tables.borrow().closure_tys.get(&def_id) {
+ return ty.subst(self, substs.func_substs);
+ }
+
+ let ty = self.sess.cstore.closure_ty(self.global_tcx(), def_id);
+ self.tables.borrow_mut().closure_tys.insert(def_id, ty.clone());
+ ty.subst(self, substs.func_substs)
}
/// Given the def_id of an impl, return the def_id of the trait it implements.
/// If it implements no trait, return `None`.
- pub fn trait_id_of_impl(&self, def_id: DefId) -> Option<DefId> {
+ pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
self.impl_trait_ref(def_id).map(|tr| tr.def_id)
}
/// If the given def ID describes a method belonging to an impl, return the
/// ID of the impl that the method belongs to. Otherwise, return `None`.
- pub fn impl_of_method(&self, def_id: DefId) -> Option<DefId> {
+ pub fn impl_of_method(self, def_id: DefId) -> Option<DefId> {
if def_id.krate != LOCAL_CRATE {
- return self.sess.cstore.impl_or_trait_item(self, def_id).and_then(|item| {
+ return self.sess.cstore.impl_or_trait_item(self.global_tcx(), def_id)
+ .and_then(|item| {
match item.container() {
TraitContainer(_) => None,
ImplContainer(def_id) => Some(def_id),
/// If the given def ID describes an item belonging to a trait (either a
/// default method or an implementation of a trait method), return the ID of
/// the trait that the method belongs to. Otherwise, return `None`.
- pub fn trait_of_item(&self, def_id: DefId) -> Option<DefId> {
+ pub fn trait_of_item(self, def_id: DefId) -> Option<DefId> {
if def_id.krate != LOCAL_CRATE {
- return self.sess.cstore.trait_of_item(self, def_id);
+ return self.sess.cstore.trait_of_item(self.global_tcx(), def_id);
}
match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
Some(impl_or_trait_item) => {
/// is already that of the original trait method, then the return value is
/// the same).
/// Otherwise, return `None`.
- pub fn trait_item_of_item(&self, def_id: DefId) -> Option<ImplOrTraitItemId> {
+ pub fn trait_item_of_item(self, def_id: DefId) -> Option<ImplOrTraitItemId> {
let impl_item = match self.impl_or_trait_items.borrow().get(&def_id) {
Some(m) => m.clone(),
None => return None,
/// Construct a parameter environment suitable for static contexts or other contexts where there
/// are no free type/lifetime parameters in scope.
- pub fn empty_parameter_environment<'a>(&'a self)
- -> ParameterEnvironment<'a,'tcx> {
+ pub fn empty_parameter_environment(self) -> ParameterEnvironment<'tcx> {
// for an empty parameter environment, there ARE no free
// regions, so it shouldn't matter what we use for the free id
let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID);
- ty::ParameterEnvironment { tcx: self,
- free_substs: Substs::empty(),
- caller_bounds: Vec::new(),
- implicit_region_bound: ty::ReEmpty,
- selection_cache: traits::SelectionCache::new(),
- evaluation_cache: traits::EvaluationCache::new(),
- free_id_outlive: free_id_outlive }
+ ty::ParameterEnvironment {
+ free_substs: self.mk_substs(Substs::empty()),
+ caller_bounds: Vec::new(),
+ implicit_region_bound: ty::ReEmpty,
+ free_id_outlive: free_id_outlive
+ }
}
/// Constructs and returns a substitution that can be applied to move from
/// In general, this means converting from bound parameters to
/// free parameters. Since we currently represent bound/free type
/// parameters in the same way, this only has an effect on regions.
- pub fn construct_free_substs(&self, generics: &Generics<'tcx>,
- free_id_outlive: CodeExtent) -> Substs<'tcx> {
+ pub fn construct_free_substs(self, generics: &Generics<'gcx>,
+ free_id_outlive: CodeExtent) -> Substs<'gcx> {
// map T => T
let mut types = VecPerParamSpace::empty();
for def in generics.types.as_slice() {
debug!("construct_parameter_environment(): push_types_from_defs: def={:?}",
def);
- types.push(def.space, self.mk_param_from_def(def));
+ types.push(def.space, self.global_tcx().mk_param_from_def(def));
}
// map bound 'a => free 'a
/// See `ParameterEnvironment` struct def'n for details.
/// If you were using `free_id: NodeId`, you might try `self.region_maps.item_extent(free_id)`
/// for the `free_id_outlive` parameter. (But note that that is not always quite right.)
- pub fn construct_parameter_environment<'a>(&'a self,
- span: Span,
- generics: &ty::Generics<'tcx>,
- generic_predicates: &ty::GenericPredicates<'tcx>,
- free_id_outlive: CodeExtent)
- -> ParameterEnvironment<'a, 'tcx>
+ pub fn construct_parameter_environment(self,
+ span: Span,
+ generics: &ty::Generics<'gcx>,
+ generic_predicates: &ty::GenericPredicates<'gcx>,
+ free_id_outlive: CodeExtent)
+ -> ParameterEnvironment<'gcx>
{
//
// Construct the free substs.
// Compute the bounds on Self and the type parameters.
//
- let bounds = generic_predicates.instantiate(self, &free_substs);
- let bounds = self.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds));
+ let tcx = self.global_tcx();
+ let bounds = generic_predicates.instantiate(tcx, &free_substs);
+ let bounds = tcx.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds));
let predicates = bounds.predicates.into_vec();
// Finally, we have to normalize the bounds in the environment, in
//
let unnormalized_env = ty::ParameterEnvironment {
- tcx: self,
- free_substs: free_substs,
+ free_substs: tcx.mk_substs(free_substs),
implicit_region_bound: ty::ReScope(free_id_outlive),
caller_bounds: predicates,
- selection_cache: traits::SelectionCache::new(),
- evaluation_cache: traits::EvaluationCache::new(),
free_id_outlive: free_id_outlive,
};
let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps));
- traits::normalize_param_env_or_error(unnormalized_env, cause)
+ traits::normalize_param_env_or_error(tcx, unnormalized_env, cause)
}
- pub fn is_method_call(&self, expr_id: NodeId) -> bool {
+ pub fn is_method_call(self, expr_id: NodeId) -> bool {
self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id))
}
- pub fn is_overloaded_autoderef(&self, expr_id: NodeId, autoderefs: u32) -> bool {
+ pub fn is_overloaded_autoderef(self, expr_id: NodeId, autoderefs: u32) -> bool {
self.tables.borrow().method_map.contains_key(&MethodCall::autoderef(expr_id,
autoderefs))
}
- pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+ pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone())
}
- pub fn visit_all_items_in_krate<V,F>(&self,
+ pub fn visit_all_items_in_krate<V,F>(self,
dep_node_fn: F,
visitor: &mut V)
- where F: FnMut(DefId) -> DepNode<DefId>, V: Visitor<'tcx>
+ where F: FnMut(DefId) -> DepNode<DefId>, V: Visitor<'gcx>
{
- dep_graph::visit_all_items_in_krate(self, dep_node_fn, visitor);
+ dep_graph::visit_all_items_in_krate(self.global_tcx(), dep_node_fn, visitor);
}
/// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err`
/// with the name of the crate containing the impl.
- pub fn span_of_impl(&self, impl_did: DefId) -> Result<Span, InternedString> {
+ pub fn span_of_impl(self, impl_did: DefId) -> Result<Span, InternedString> {
if impl_did.is_local() {
let node_id = self.map.as_local_node_id(impl_did).unwrap();
Ok(self.map.span(node_id))
ByBox,
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn with_freevars<T, F>(&self, fid: NodeId, f: F) -> T where
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn with_freevars<T, F>(self, fid: NodeId, f: F) -> T where
F: FnOnce(&[hir::Freevar]) -> T,
{
match self.freevars.borrow().get(&fid) {
EscapingProjection(Vec<Component<'tcx>>),
}
-/// Returns all the things that must outlive `'a` for the condition
-/// `ty0: 'a` to hold.
-pub fn components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- ty0: Ty<'tcx>)
- -> Vec<Component<'tcx>> {
- let mut components = vec![];
- compute_components(infcx, ty0, &mut components);
- debug!("components({:?}) = {:?}", ty0, components);
- components
-}
+impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
+ /// Returns all the things that must outlive `'a` for the condition
+ /// `ty0: 'a` to hold.
+ pub fn outlives_components(&self, ty0: Ty<'tcx>)
+ -> Vec<Component<'tcx>> {
+ let mut components = vec![];
+ self.compute_components(ty0, &mut components);
+ debug!("components({:?}) = {:?}", ty0, components);
+ components
+ }
-fn compute_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- ty: Ty<'tcx>,
- out: &mut Vec<Component<'tcx>>) {
- // Descend through the types, looking for the various "base"
- // components and collecting them into `out`. This is not written
- // with `collect()` because of the need to sometimes skip subtrees
- // in the `subtys` iterator (e.g., when encountering a
- // projection).
- match ty.sty {
- ty::TyClosure(_, ref substs) => {
- // FIXME(#27086). We do not accumulate from substs, since they
- // don't represent reachable data. This means that, in
- // practice, some of the lifetime parameters might not
- // be in scope when the body runs, so long as there is
- // no reachable data with that lifetime. For better or
- // worse, this is consistent with fn types, however,
- // which can also encapsulate data in this fashion
- // (though it's somewhat harder, and typically
- // requires virtual dispatch).
- //
- // Note that changing this (in a naive way, at least)
- // causes regressions for what appears to be perfectly
- // reasonable code like this:
- //
- // ```
- // fn foo<'a>(p: &Data<'a>) {
- // bar(|q: &mut Parser| q.read_addr())
- // }
- // fn bar(p: Box<FnMut(&mut Parser)+'static>) {
- // }
- // ```
- //
- // Note that `p` (and `'a`) are not used in the
- // closure at all, but to meet the requirement that
- // the closure type `C: 'static` (so it can be coerced
- // to the object type), we get the requirement that
- // `'a: 'static` since `'a` appears in the closure
- // type `C`.
- //
- // A smarter fix might "prune" unused `func_substs` --
- // this would avoid breaking simple examples like
- // this, but would still break others (which might
- // indeed be invalid, depending on your POV). Pruning
- // would be a subtle process, since we have to see
- // what func/type parameters are used and unused,
- // taking into consideration UFCS and so forth.
+ fn compute_components(&self, ty: Ty<'tcx>, out: &mut Vec<Component<'tcx>>) {
+ // Descend through the types, looking for the various "base"
+ // components and collecting them into `out`. This is not written
+ // with `collect()` because of the need to sometimes skip subtrees
+ // in the `subtys` iterator (e.g., when encountering a
+ // projection).
+ match ty.sty {
+ ty::TyClosure(_, ref substs) => {
+ // FIXME(#27086). We do not accumulate from substs, since they
+ // don't represent reachable data. This means that, in
+ // practice, some of the lifetime parameters might not
+ // be in scope when the body runs, so long as there is
+ // no reachable data with that lifetime. For better or
+ // worse, this is consistent with fn types, however,
+ // which can also encapsulate data in this fashion
+ // (though it's somewhat harder, and typically
+ // requires virtual dispatch).
+ //
+ // Note that changing this (in a naive way, at least)
+ // causes regressions for what appears to be perfectly
+ // reasonable code like this:
+ //
+ // ```
+ // fn foo<'a>(p: &Data<'a>) {
+ // bar(|q: &mut Parser| q.read_addr())
+ // }
+ // fn bar(p: Box<FnMut(&mut Parser)+'static>) {
+ // }
+ // ```
+ //
+ // Note that `p` (and `'a`) are not used in the
+ // closure at all, but to meet the requirement that
+ // the closure type `C: 'static` (so it can be coerced
+ // to the object type), we get the requirement that
+ // `'a: 'static` since `'a` appears in the closure
+ // type `C`.
+ //
+ // A smarter fix might "prune" unused `func_substs` --
+ // this would avoid breaking simple examples like
+ // this, but would still break others (which might
+ // indeed be invalid, depending on your POV). Pruning
+ // would be a subtle process, since we have to see
+ // what func/type parameters are used and unused,
+ // taking into consideration UFCS and so forth.
- for &upvar_ty in &substs.upvar_tys {
- compute_components(infcx, upvar_ty, out);
+ for &upvar_ty in substs.upvar_tys {
+ self.compute_components(upvar_ty, out);
+ }
}
- }
- // OutlivesTypeParameterEnv -- the actual checking that `X:'a`
- // is implied by the environment is done in regionck.
- ty::TyParam(p) => {
- out.push(Component::Param(p));
- }
+ // OutlivesTypeParameterEnv -- the actual checking that `X:'a`
+ // is implied by the environment is done in regionck.
+ ty::TyParam(p) => {
+ out.push(Component::Param(p));
+ }
- // For projections, we prefer to generate an obligation like
- // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
- // regionck more ways to prove that it holds. However,
- // regionck is not (at least currently) prepared to deal with
- // higher-ranked regions that may appear in the
- // trait-ref. Therefore, if we see any higher-ranke regions,
- // we simply fallback to the most restrictive rule, which
- // requires that `Pi: 'a` for all `i`.
- ty::TyProjection(ref data) => {
- if !data.has_escaping_regions() {
- // best case: no escaping regions, so push the
- // projection and skip the subtree (thus generating no
- // constraints for Pi). This defers the choice between
- // the rules OutlivesProjectionEnv,
- // OutlivesProjectionTraitDef, and
- // OutlivesProjectionComponents to regionck.
- out.push(Component::Projection(*data));
- } else {
- // fallback case: hard code
- // OutlivesProjectionComponents. Continue walking
- // through and constrain Pi.
- let subcomponents = capture_components(infcx, ty);
- out.push(Component::EscapingProjection(subcomponents));
+ // For projections, we prefer to generate an obligation like
+ // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
+ // regionck more ways to prove that it holds. However,
+ // regionck is not (at least currently) prepared to deal with
+ // higher-ranked regions that may appear in the
+ // trait-ref. Therefore, if we see any higher-ranke regions,
+ // we simply fallback to the most restrictive rule, which
+ // requires that `Pi: 'a` for all `i`.
+ ty::TyProjection(ref data) => {
+ if !data.has_escaping_regions() {
+ // best case: no escaping regions, so push the
+ // projection and skip the subtree (thus generating no
+ // constraints for Pi). This defers the choice between
+ // the rules OutlivesProjectionEnv,
+ // OutlivesProjectionTraitDef, and
+ // OutlivesProjectionComponents to regionck.
+ out.push(Component::Projection(*data));
+ } else {
+ // fallback case: hard code
+ // OutlivesProjectionComponents. Continue walking
+ // through and constrain Pi.
+ let subcomponents = self.capture_components(ty);
+ out.push(Component::EscapingProjection(subcomponents));
+ }
}
- }
- // If we encounter an inference variable, try to resolve it
- // and proceed with resolved version. If we cannot resolve it,
- // then record the unresolved variable as a component.
- ty::TyInfer(_) => {
- let ty = infcx.resolve_type_vars_if_possible(&ty);
- if let ty::TyInfer(infer_ty) = ty.sty {
- out.push(Component::UnresolvedInferenceVariable(infer_ty));
- } else {
- compute_components(infcx, ty, out);
+ // If we encounter an inference variable, try to resolve it
+ // and proceed with resolved version. If we cannot resolve it,
+ // then record the unresolved variable as a component.
+ ty::TyInfer(_) => {
+ let ty = self.resolve_type_vars_if_possible(&ty);
+ if let ty::TyInfer(infer_ty) = ty.sty {
+ out.push(Component::UnresolvedInferenceVariable(infer_ty));
+ } else {
+ self.compute_components(ty, out);
+ }
}
- }
- // Most types do not introduce any region binders, nor
- // involve any other subtle cases, and so the WF relation
- // simply constraints any regions referenced directly by
- // the type and then visits the types that are lexically
- // contained within. (The comments refer to relevant rules
- // from RFC1214.)
- ty::TyBool | // OutlivesScalar
- ty::TyChar | // OutlivesScalar
- ty::TyInt(..) | // OutlivesScalar
- ty::TyUint(..) | // OutlivesScalar
- ty::TyFloat(..) | // OutlivesScalar
- ty::TyEnum(..) | // OutlivesNominalType
- ty::TyStruct(..) | // OutlivesNominalType
- ty::TyBox(..) | // OutlivesNominalType (ish)
- ty::TyStr | // OutlivesScalar (ish)
- ty::TyArray(..) | // ...
- ty::TySlice(..) | // ...
- ty::TyRawPtr(..) | // ...
- ty::TyRef(..) | // OutlivesReference
- ty::TyTuple(..) | // ...
- ty::TyFnDef(..) | // OutlivesFunction (*)
- ty::TyFnPtr(_) | // OutlivesFunction (*)
- ty::TyTrait(..) | // OutlivesObject, OutlivesFragment (*)
- ty::TyError => {
- // (*) Bare functions and traits are both binders. In the
- // RFC, this means we would add the bound regions to the
- // "bound regions list". In our representation, no such
- // list is maintained explicitly, because bound regions
- // themselves can be readily identified.
+ // Most types do not introduce any region binders, nor
+ // involve any other subtle cases, and so the WF relation
+ // simply constraints any regions referenced directly by
+ // the type and then visits the types that are lexically
+ // contained within. (The comments refer to relevant rules
+ // from RFC1214.)
+ ty::TyBool | // OutlivesScalar
+ ty::TyChar | // OutlivesScalar
+ ty::TyInt(..) | // OutlivesScalar
+ ty::TyUint(..) | // OutlivesScalar
+ ty::TyFloat(..) | // OutlivesScalar
+ ty::TyEnum(..) | // OutlivesNominalType
+ ty::TyStruct(..) | // OutlivesNominalType
+ ty::TyBox(..) | // OutlivesNominalType (ish)
+ ty::TyStr | // OutlivesScalar (ish)
+ ty::TyArray(..) | // ...
+ ty::TySlice(..) | // ...
+ ty::TyRawPtr(..) | // ...
+ ty::TyRef(..) | // OutlivesReference
+ ty::TyTuple(..) | // ...
+ ty::TyFnDef(..) | // OutlivesFunction (*)
+ ty::TyFnPtr(_) | // OutlivesFunction (*)
+ ty::TyTrait(..) | // OutlivesObject, OutlivesFragment (*)
+ ty::TyError => {
+ // (*) Bare functions and traits are both binders. In the
+ // RFC, this means we would add the bound regions to the
+ // "bound regions list". In our representation, no such
+ // list is maintained explicitly, because bound regions
+ // themselves can be readily identified.
- push_region_constraints(out, ty.regions());
- for subty in ty.walk_shallow() {
- compute_components(infcx, subty, out);
+ push_region_constraints(out, ty.regions());
+ for subty in ty.walk_shallow() {
+ self.compute_components(subty, out);
+ }
}
}
}
-}
-fn capture_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
- ty: Ty<'tcx>)
- -> Vec<Component<'tcx>> {
- let mut temp = vec![];
- push_region_constraints(&mut temp, ty.regions());
- for subty in ty.walk_shallow() {
- compute_components(infcx, subty, &mut temp);
+ fn capture_components(&self, ty: Ty<'tcx>) -> Vec<Component<'tcx>> {
+ let mut temp = vec![];
+ push_region_constraints(&mut temp, ty.regions());
+ for subty in ty.walk_shallow() {
+ self.compute_components(subty, &mut temp);
+ }
+ temp
}
- temp
}
fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<ty::Region>) {
ExistentialRegionBound, // relating an existential region bound
}
-pub trait TypeRelation<'a,'tcx> : Sized {
- fn tcx(&self) -> &'a TyCtxt<'tcx>;
+pub trait TypeRelation<'a, 'gcx: 'a+'tcx, 'tcx: 'a> : Sized {
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx>;
/// Returns a static string we can use for printouts.
fn tag(&self) -> &'static str;
}
/// Generic relation routine suitable for most anything.
- fn relate<T:Relate<'a,'tcx>>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> {
+ fn relate<T: Relate<'tcx>>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> {
Relate::relate(self, a, b)
}
/// Relete elements of two slices pairwise.
- fn relate_zip<T:Relate<'a,'tcx>>(&mut self, a: &[T], b: &[T]) -> RelateResult<'tcx, Vec<T>> {
+ fn relate_zip<T: Relate<'tcx>>(&mut self, a: &[T], b: &[T]) -> RelateResult<'tcx, Vec<T>> {
assert_eq!(a.len(), b.len());
a.iter().zip(b).map(|(a, b)| self.relate(a, b)).collect()
}
/// Switch variance for the purpose of relating `a` and `b`.
- fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
- variance: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>;
+ fn relate_with_variance<T: Relate<'tcx>>(&mut self,
+ variance: ty::Variance,
+ a: &T,
+ b: &T)
+ -> RelateResult<'tcx, T>;
// Overrideable relations. You shouldn't typically call these
// directly, instead call `relate()`, which in turn calls
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'a,'tcx>;
+ where T: Relate<'tcx>;
}
-pub trait Relate<'a,'tcx>: TypeFoldable<'tcx> {
- fn relate<R:TypeRelation<'a,'tcx>>(relation: &mut R,
- a: &Self,
- b: &Self)
- -> RelateResult<'tcx, Self>;
+pub trait Relate<'tcx>: TypeFoldable<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R, a: &Self, b: &Self)
+ -> RelateResult<'tcx, Self>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a;
}
///////////////////////////////////////////////////////////////////////////
// Relate impls
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TypeAndMut<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::TypeAndMut<'tcx>,
- b: &ty::TypeAndMut<'tcx>)
- -> RelateResult<'tcx, ty::TypeAndMut<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::TypeAndMut<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::TypeAndMut<'tcx>,
+ b: &ty::TypeAndMut<'tcx>)
+ -> RelateResult<'tcx, ty::TypeAndMut<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
debug!("{}.mts({:?}, {:?})",
relation.tag(),
// substitutions are not themselves relatable without more context,
// but they is an important subroutine for things that ARE relatable,
// like traits etc.
-fn relate_item_substs<'a,'tcx:'a,R>(relation: &mut R,
- item_def_id: DefId,
- a_subst: &Substs<'tcx>,
- b_subst: &Substs<'tcx>)
- -> RelateResult<'tcx, Substs<'tcx>>
- where R: TypeRelation<'a,'tcx>
+fn relate_item_substs<'a, 'gcx, 'tcx, R>(relation: &mut R,
+ item_def_id: DefId,
+ a_subst: &'tcx Substs<'tcx>,
+ b_subst: &'tcx Substs<'tcx>)
+ -> RelateResult<'tcx, &'tcx Substs<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
debug!("substs: item_def_id={:?} a_subst={:?} b_subst={:?}",
item_def_id,
relate_substs(relation, opt_variances, a_subst, b_subst)
}
-pub fn relate_substs<'a,'tcx:'a,R>(relation: &mut R,
- variances: Option<&ty::ItemVariances>,
- a_subst: &Substs<'tcx>,
- b_subst: &Substs<'tcx>)
- -> RelateResult<'tcx, Substs<'tcx>>
- where R: TypeRelation<'a,'tcx>
+pub fn relate_substs<'a, 'gcx, 'tcx, R>(relation: &mut R,
+ variances: Option<&ty::ItemVariances>,
+ a_subst: &'tcx Substs<'tcx>,
+ b_subst: &'tcx Substs<'tcx>)
+ -> RelateResult<'tcx, &'tcx Substs<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let mut substs = Substs::empty();
substs.regions.replace(space, regions);
}
- Ok(substs)
+ Ok(relation.tcx().mk_substs(substs))
}
-fn relate_type_params<'a,'tcx:'a,R>(relation: &mut R,
- variances: Option<&[ty::Variance]>,
- a_tys: &[Ty<'tcx>],
- b_tys: &[Ty<'tcx>])
- -> RelateResult<'tcx, Vec<Ty<'tcx>>>
- where R: TypeRelation<'a,'tcx>
+fn relate_type_params<'a, 'gcx, 'tcx, R>(relation: &mut R,
+ variances: Option<&[ty::Variance]>,
+ a_tys: &[Ty<'tcx>],
+ b_tys: &[Ty<'tcx>])
+ -> RelateResult<'tcx, Vec<Ty<'tcx>>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
if a_tys.len() != b_tys.len() {
return Err(TypeError::TyParamSize(expected_found(relation,
.collect()
}
-fn relate_region_params<'a,'tcx:'a,R>(relation: &mut R,
- variances: Option<&[ty::Variance]>,
- a_rs: &[ty::Region],
- b_rs: &[ty::Region])
- -> RelateResult<'tcx, Vec<ty::Region>>
- where R: TypeRelation<'a,'tcx>
+fn relate_region_params<'a, 'gcx, 'tcx, R>(relation: &mut R,
+ variances: Option<&[ty::Variance]>,
+ a_rs: &[ty::Region],
+ b_rs: &[ty::Region])
+ -> RelateResult<'tcx, Vec<ty::Region>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let num_region_params = a_rs.len();
.collect()
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BareFnTy<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::BareFnTy<'tcx>,
- b: &ty::BareFnTy<'tcx>)
- -> RelateResult<'tcx, ty::BareFnTy<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for &'tcx ty::BareFnTy<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &&'tcx ty::BareFnTy<'tcx>,
+ b: &&'tcx ty::BareFnTy<'tcx>)
+ -> RelateResult<'tcx, &'tcx ty::BareFnTy<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let unsafety = relation.relate(&a.unsafety, &b.unsafety)?;
let abi = relation.relate(&a.abi, &b.abi)?;
let sig = relation.relate(&a.sig, &b.sig)?;
- Ok(ty::BareFnTy {unsafety: unsafety,
- abi: abi,
- sig: sig})
+ Ok(relation.tcx().mk_bare_fn(ty::BareFnTy {
+ unsafety: unsafety,
+ abi: abi,
+ sig: sig
+ }))
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::FnSig<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::FnSig<'tcx>,
- b: &ty::FnSig<'tcx>)
- -> RelateResult<'tcx, ty::FnSig<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::FnSig<'tcx>,
+ b: &ty::FnSig<'tcx>)
+ -> RelateResult<'tcx, ty::FnSig<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
if a.variadic != b.variadic {
return Err(TypeError::VariadicMismatch(
}
}
-fn relate_arg_vecs<'a,'tcx:'a,R>(relation: &mut R,
- a_args: &[Ty<'tcx>],
- b_args: &[Ty<'tcx>])
- -> RelateResult<'tcx, Vec<Ty<'tcx>>>
- where R: TypeRelation<'a,'tcx>
+fn relate_arg_vecs<'a, 'gcx, 'tcx, R>(relation: &mut R,
+ a_args: &[Ty<'tcx>],
+ b_args: &[Ty<'tcx>])
+ -> RelateResult<'tcx, Vec<Ty<'tcx>>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
if a_args.len() != b_args.len() {
return Err(TypeError::ArgCount);
.collect()
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ast::Unsafety {
- fn relate<R>(relation: &mut R,
- a: &ast::Unsafety,
- b: &ast::Unsafety)
- -> RelateResult<'tcx, ast::Unsafety>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ast::Unsafety {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ast::Unsafety,
+ b: &ast::Unsafety)
+ -> RelateResult<'tcx, ast::Unsafety>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
if a != b {
Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for abi::Abi {
- fn relate<R>(relation: &mut R,
- a: &abi::Abi,
- b: &abi::Abi)
- -> RelateResult<'tcx, abi::Abi>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for abi::Abi {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &abi::Abi,
+ b: &abi::Abi)
+ -> RelateResult<'tcx, abi::Abi>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
if a == b {
Ok(*a)
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionTy<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::ProjectionTy<'tcx>,
- b: &ty::ProjectionTy<'tcx>)
- -> RelateResult<'tcx, ty::ProjectionTy<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::ProjectionTy<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::ProjectionTy<'tcx>,
+ b: &ty::ProjectionTy<'tcx>)
+ -> RelateResult<'tcx, ty::ProjectionTy<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
if a.item_name != b.item_name {
Err(TypeError::ProjectionNameMismatched(
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionPredicate<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::ProjectionPredicate<'tcx>,
- b: &ty::ProjectionPredicate<'tcx>)
- -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::ProjectionPredicate<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::ProjectionPredicate<'tcx>,
+ b: &ty::ProjectionPredicate<'tcx>)
+ -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let projection_ty = relation.relate(&a.projection_ty, &b.projection_ty)?;
let ty = relation.relate(&a.ty, &b.ty)?;
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for Vec<ty::PolyProjectionPredicate<'tcx>> {
- fn relate<R>(relation: &mut R,
- a: &Vec<ty::PolyProjectionPredicate<'tcx>>,
- b: &Vec<ty::PolyProjectionPredicate<'tcx>>)
- -> RelateResult<'tcx, Vec<ty::PolyProjectionPredicate<'tcx>>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for Vec<ty::PolyProjectionPredicate<'tcx>> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &Vec<ty::PolyProjectionPredicate<'tcx>>,
+ b: &Vec<ty::PolyProjectionPredicate<'tcx>>)
+ -> RelateResult<'tcx, Vec<ty::PolyProjectionPredicate<'tcx>>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
// To be compatible, `a` and `b` must be for precisely the
// same set of traits and item names. We always require that
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ExistentialBounds<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::ExistentialBounds<'tcx>,
- b: &ty::ExistentialBounds<'tcx>)
- -> RelateResult<'tcx, ty::ExistentialBounds<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::ExistentialBounds<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::ExistentialBounds<'tcx>,
+ b: &ty::ExistentialBounds<'tcx>)
+ -> RelateResult<'tcx, ty::ExistentialBounds<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let r =
relation.with_cause(
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BuiltinBounds {
- fn relate<R>(relation: &mut R,
- a: &ty::BuiltinBounds,
- b: &ty::BuiltinBounds)
- -> RelateResult<'tcx, ty::BuiltinBounds>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::BuiltinBounds {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::BuiltinBounds,
+ b: &ty::BuiltinBounds)
+ -> RelateResult<'tcx, ty::BuiltinBounds>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
// Two sets of builtin bounds are only relatable if they are
// precisely the same (but see the coercion code).
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TraitRef<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::TraitRef<'tcx>,
- b: &ty::TraitRef<'tcx>)
- -> RelateResult<'tcx, ty::TraitRef<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::TraitRef<'tcx>,
+ b: &ty::TraitRef<'tcx>)
+ -> RelateResult<'tcx, ty::TraitRef<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
// Different traits cannot be related
if a.def_id != b.def_id {
Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id)))
} else {
let substs = relate_item_substs(relation, a.def_id, a.substs, b.substs)?;
- Ok(ty::TraitRef { def_id: a.def_id, substs: relation.tcx().mk_substs(substs) })
+ Ok(ty::TraitRef { def_id: a.def_id, substs: substs })
}
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for Ty<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &Ty<'tcx>,
- b: &Ty<'tcx>)
- -> RelateResult<'tcx, Ty<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for Ty<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &Ty<'tcx>,
+ b: &Ty<'tcx>)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
relation.tys(a, b)
}
/// The main "type relation" routine. Note that this does not handle
/// inference artifacts, so you should filter those out before calling
/// it.
-pub fn super_relate_tys<'a,'tcx:'a,R>(relation: &mut R,
- a: Ty<'tcx>,
- b: Ty<'tcx>)
- -> RelateResult<'tcx, Ty<'tcx>>
- where R: TypeRelation<'a,'tcx>
+pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let tcx = relation.tcx();
let a_sty = &a.sty;
if a_def == b_def =>
{
let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?;
- Ok(tcx.mk_enum(a_def, tcx.mk_substs(substs)))
+ Ok(tcx.mk_enum(a_def, substs))
}
(&ty::TyTrait(ref a_), &ty::TyTrait(ref b_)) =>
if a_def == b_def =>
{
let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?;
- Ok(tcx.mk_struct(a_def, tcx.mk_substs(substs)))
+ Ok(tcx.mk_struct(a_def, substs))
}
- (&ty::TyClosure(a_id, ref a_substs),
- &ty::TyClosure(b_id, ref b_substs))
+ (&ty::TyClosure(a_id, a_substs),
+ &ty::TyClosure(b_id, b_substs))
if a_id == b_id =>
{
// All TyClosure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
- let substs = relation.relate(a_substs, b_substs)?;
+ let substs = relation.relate(&a_substs, &b_substs)?;
Ok(tcx.mk_closure_from_closure_substs(a_id, substs))
}
Ok(tcx.mk_slice(t))
}
- (&ty::TyTuple(ref as_), &ty::TyTuple(ref bs)) =>
+ (&ty::TyTuple(as_), &ty::TyTuple(bs)) =>
{
if as_.len() == bs.len() {
let ts = as_.iter().zip(bs)
if a_def_id == b_def_id =>
{
let substs = relate_substs(relation, None, a_substs, b_substs)?;
- let fty = relation.relate(a_fty, b_fty)?;
- Ok(tcx.mk_fn_def(a_def_id, tcx.mk_substs(substs), fty))
+ let fty = relation.relate(&a_fty, &b_fty)?;
+ Ok(tcx.mk_fn_def(a_def_id, substs, fty))
}
(&ty::TyFnPtr(a_fty), &ty::TyFnPtr(b_fty)) =>
{
- let fty = relation.relate(a_fty, b_fty)?;
+ let fty = relation.relate(&a_fty, &b_fty)?;
Ok(tcx.mk_fn_ptr(fty))
}
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ClosureSubsts<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &ty::ClosureSubsts<'tcx>,
- b: &ty::ClosureSubsts<'tcx>)
- -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::ClosureSubsts<'tcx>,
+ b: &ty::ClosureSubsts<'tcx>)
+ -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
- let func_substs = relate_substs(relation, None, a.func_substs, b.func_substs)?;
+ let substs = relate_substs(relation, None, a.func_substs, b.func_substs)?;
let upvar_tys = relation.relate_zip(&a.upvar_tys, &b.upvar_tys)?;
- Ok(ty::ClosureSubsts { func_substs: relation.tcx().mk_substs(func_substs),
- upvar_tys: upvar_tys })
+ Ok(ty::ClosureSubsts {
+ func_substs: substs,
+ upvar_tys: relation.tcx().mk_type_list(upvar_tys)
+ })
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for Substs<'tcx> {
- fn relate<R>(relation: &mut R,
- a: &Substs<'tcx>,
- b: &Substs<'tcx>)
- -> RelateResult<'tcx, Substs<'tcx>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for &'tcx Substs<'tcx> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &&'tcx Substs<'tcx>,
+ b: &&'tcx Substs<'tcx>)
+ -> RelateResult<'tcx, &'tcx Substs<'tcx>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
relate_substs(relation, None, a, b)
}
}
-impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::Region {
- fn relate<R>(relation: &mut R,
- a: &ty::Region,
- b: &ty::Region)
- -> RelateResult<'tcx, ty::Region>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx> Relate<'tcx> for ty::Region {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::Region,
+ b: &ty::Region)
+ -> RelateResult<'tcx, ty::Region>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
relation.regions(*a, *b)
}
}
-impl<'a,'tcx:'a,T> Relate<'a,'tcx> for ty::Binder<T>
- where T: Relate<'a,'tcx>
-{
- fn relate<R>(relation: &mut R,
- a: &ty::Binder<T>,
- b: &ty::Binder<T>)
- -> RelateResult<'tcx, ty::Binder<T>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for ty::Binder<T> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &ty::Binder<T>,
+ b: &ty::Binder<T>)
+ -> RelateResult<'tcx, ty::Binder<T>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
relation.binders(a, b)
}
}
-impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Rc<T>
- where T: Relate<'a,'tcx>
-{
- fn relate<R>(relation: &mut R,
- a: &Rc<T>,
- b: &Rc<T>)
- -> RelateResult<'tcx, Rc<T>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for Rc<T> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &Rc<T>,
+ b: &Rc<T>)
+ -> RelateResult<'tcx, Rc<T>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let a: &T = a;
let b: &T = b;
}
}
-impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Box<T>
- where T: Relate<'a,'tcx>
-{
- fn relate<R>(relation: &mut R,
- a: &Box<T>,
- b: &Box<T>)
- -> RelateResult<'tcx, Box<T>>
- where R: TypeRelation<'a,'tcx>
+impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for Box<T> {
+ fn relate<'a, 'gcx, R>(relation: &mut R,
+ a: &Box<T>,
+ b: &Box<T>)
+ -> RelateResult<'tcx, Box<T>>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
let a: &T = a;
let b: &T = b;
///////////////////////////////////////////////////////////////////////////
// Error handling
-pub fn expected_found<'a,'tcx:'a,R,T>(relation: &mut R,
- a: &T,
- b: &T)
- -> ExpectedFound<T>
- where R: TypeRelation<'a,'tcx>, T: Clone
+pub fn expected_found<'a, 'gcx, 'tcx, R, T>(relation: &mut R,
+ a: &T,
+ b: &T)
+ -> ExpectedFound<T>
+ where R: TypeRelation<'a, 'gcx, 'tcx>, T: Clone, 'gcx: 'a+'tcx, 'tcx: 'a
{
expected_found_bool(relation.a_is_expected(), a, b)
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use infer::type_variable;
use ty::subst::{self, VecPerParamSpace};
-use traits;
use ty::{self, Lift, TraitRef, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
type Lifted = (A::Lifted, B::Lifted);
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b)))
}
}
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
+ type Lifted = Option<T::Lifted>;
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match *self {
+ Some(ref x) => tcx.lift(x).map(Some),
+ None => Some(None)
+ }
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
+ type Lifted = Result<T::Lifted, E::Lifted>;
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match *self {
+ Ok(ref x) => tcx.lift(x).map(Ok),
+ Err(ref e) => tcx.lift(e).map(Err)
+ }
+ }
+}
+
impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] {
type Lifted = Vec<T::Lifted>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
// type annotation needed to inform `projection_must_outlive`
let mut result : Vec<<T as Lift<'tcx>>::Lifted>
= Vec::with_capacity(self.len());
}
}
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
+ type Lifted = Vec<T::Lifted>;
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&self[..])
+ }
+}
+
impl<'tcx> Lift<'tcx> for ty::Region {
type Lifted = Self;
- fn lift_to_tcx(&self, _: &TyCtxt<'tcx>) -> Option<ty::Region> {
+ fn lift_to_tcx(&self, _: TyCtxt) -> Option<ty::Region> {
Some(*self)
}
}
impl<'a, 'tcx> Lift<'tcx> for TraitRef<'a> {
type Lifted = TraitRef<'tcx>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<TraitRef<'tcx>> {
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<TraitRef<'tcx>> {
tcx.lift(&self.substs).map(|substs| TraitRef {
def_id: self.def_id,
substs: substs
impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
type Lifted = ty::TraitPredicate<'tcx>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
+ -> Option<ty::TraitPredicate<'tcx>> {
tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate {
trait_ref: trait_ref
})
impl<'a, 'tcx> Lift<'tcx> for ty::EquatePredicate<'a> {
type Lifted = ty::EquatePredicate<'tcx>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<ty::EquatePredicate<'tcx>> {
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
+ -> Option<ty::EquatePredicate<'tcx>> {
tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::EquatePredicate(a, b))
}
}
impl<'tcx, A: Copy+Lift<'tcx>, B: Copy+Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
}
}
impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
type Lifted = ty::ProjectionPredicate<'tcx>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
+ -> Option<ty::ProjectionPredicate<'tcx>> {
tcx.lift(&(self.projection_ty.trait_ref, self.ty)).map(|(trait_ref, ty)| {
ty::ProjectionPredicate {
projection_ty: ty::ProjectionTy {
}
}
+impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> {
+ type Lifted = ty::Predicate<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match *self {
+ ty::Predicate::Trait(ref binder) => {
+ tcx.lift(binder).map(ty::Predicate::Trait)
+ }
+ ty::Predicate::Equate(ref binder) => {
+ tcx.lift(binder).map(ty::Predicate::Equate)
+ }
+ ty::Predicate::RegionOutlives(ref binder) => {
+ tcx.lift(binder).map(ty::Predicate::RegionOutlives)
+ }
+ ty::Predicate::TypeOutlives(ref binder) => {
+ tcx.lift(binder).map(ty::Predicate::TypeOutlives)
+ }
+ ty::Predicate::Projection(ref binder) => {
+ tcx.lift(binder).map(ty::Predicate::Projection)
+ }
+ ty::Predicate::WellFormed(ty) => {
+ tcx.lift(&ty).map(ty::Predicate::WellFormed)
+ }
+ ty::Predicate::Rfc1592(box ref a) => {
+ tcx.lift(a).map(|a| ty::Predicate::Rfc1592(Box::new(a)))
+ }
+ ty::Predicate::ClosureKind(closure_def_id, kind) => {
+ Some(ty::Predicate::ClosureKind(closure_def_id, kind))
+ }
+ ty::Predicate::ObjectSafe(trait_def_id) => {
+ Some(ty::Predicate::ObjectSafe(trait_def_id))
+ }
+ }
+ }
+}
+
impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<T> {
type Lifted = ty::Binder<T::Lifted>;
- fn lift_to_tcx(&self, tcx: &TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
tcx.lift(&self.0).map(|x| ty::Binder(x))
}
}
+impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> {
+ type Lifted = ty::ClosureSubsts<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&(self.func_substs, self.upvar_tys)).map(|(substs, upvar_tys)| {
+ ty::ClosureSubsts {
+ func_substs: substs,
+ upvar_tys: upvar_tys
+ }
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ItemSubsts<'a> {
+ type Lifted = ty::ItemSubsts<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&self.substs).map(|substs| {
+ ty::ItemSubsts {
+ substs: substs
+ }
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoRef<'a> {
+ type Lifted = ty::adjustment::AutoRef<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match *self {
+ ty::adjustment::AutoPtr(r, m) => {
+ tcx.lift(&r).map(|r| ty::adjustment::AutoPtr(r, m))
+ }
+ ty::adjustment::AutoUnsafe(m) => {
+ Some(ty::adjustment::AutoUnsafe(m))
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::FnOutput<'a> {
+ type Lifted = ty::FnOutput<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ match *self {
+ ty::FnConverging(ty) => {
+ tcx.lift(&ty).map(ty::FnConverging)
+ }
+ ty::FnDiverging => Some(ty::FnDiverging)
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
+ type Lifted = ty::FnSig<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&self.inputs[..]).and_then(|inputs| {
+ tcx.lift(&self.output).map(|output| {
+ ty::FnSig {
+ inputs: inputs,
+ output: output,
+ variadic: self.variadic
+ }
+ })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ClosureTy<'a> {
+ type Lifted = ty::ClosureTy<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&self.sig).map(|sig| {
+ ty::ClosureTy {
+ sig: sig,
+ unsafety: self.unsafety,
+ abi: self.abi
+ }
+ })
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> {
+ type Lifted = ty::error::ExpectedFound<T::Lifted>;
+ fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&self.expected).and_then(|expected| {
+ tcx.lift(&self.found).map(|found| {
+ ty::error::ExpectedFound {
+ expected: expected,
+ found: found
+ }
+ })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for type_variable::Default<'a> {
+ type Lifted = type_variable::Default<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(&self.ty).map(|ty| {
+ type_variable::Default {
+ ty: ty,
+ origin_span: self.origin_span,
+ def_id: self.def_id
+ }
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> {
+ type Lifted = ty::error::TypeError<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
+ use ty::error::TypeError::*;
+
+ Some(match *self {
+ Mismatch => Mismatch,
+ UnsafetyMismatch(x) => UnsafetyMismatch(x),
+ AbiMismatch(x) => AbiMismatch(x),
+ Mutability => Mutability,
+ BoxMutability => BoxMutability,
+ PtrMutability => PtrMutability,
+ RefMutability => RefMutability,
+ VecMutability => VecMutability,
+ TupleSize(x) => TupleSize(x),
+ FixedArraySize(x) => FixedArraySize(x),
+ TyParamSize(x) => TyParamSize(x),
+ ArgCount => ArgCount,
+ RegionsDoesNotOutlive(a, b) => RegionsDoesNotOutlive(a, b),
+ RegionsNotSame(a, b) => RegionsNotSame(a, b),
+ RegionsNoOverlap(a, b) => RegionsNoOverlap(a, b),
+ RegionsInsufficientlyPolymorphic(a, b) => {
+ RegionsInsufficientlyPolymorphic(a, b)
+ }
+ RegionsOverlyPolymorphic(a, b) => RegionsOverlyPolymorphic(a, b),
+ IntegerAsChar => IntegerAsChar,
+ IntMismatch(x) => IntMismatch(x),
+ FloatMismatch(x) => FloatMismatch(x),
+ Traits(x) => Traits(x),
+ BuiltinBoundsMismatch(x) => BuiltinBoundsMismatch(x),
+ VariadicMismatch(x) => VariadicMismatch(x),
+ CyclicTy => CyclicTy,
+ ConvergenceMismatch(x) => ConvergenceMismatch(x),
+ ProjectionNameMismatched(x) => ProjectionNameMismatched(x),
+ ProjectionBoundsLength(x) => ProjectionBoundsLength(x),
+
+ Sorts(ref x) => return tcx.lift(x).map(Sorts),
+ TyParamDefaultMismatch(ref x) => {
+ return tcx.lift(x).map(TyParamDefaultMismatch)
+ }
+ })
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// TypeFoldable implementations.
//
($($ty:ty),+) => {
$(
impl<'tcx> TypeFoldable<'tcx> for $ty {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, _: &mut F) -> $ty {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> $ty {
*self
}
CopyImpls! { (), hir::Unsafety, abi::Abi }
impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> (T, U) {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> (T, U) {
(self.0.fold_with(folder), self.1.fold_with(folder))
}
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Option<T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
self.as_ref().map(|t| t.fold_with(folder))
}
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
Rc::new((**self).fold_with(folder))
}
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let content: T = (**self).fold_with(folder);
box content
}
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
self.iter().map(|t| t.fold_with(folder)).collect()
}
}
impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::Binder(self.0.fold_with(folder))
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_binder(self)
}
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for P<[T]> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
self.iter().map(|t| t.fold_with(folder)).collect()
}
}
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for VecPerParamSpace<T> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
self.map(|elem| elem.fold_with(folder))
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::TraitTy<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TraitTy {
principal: self.principal.fold_with(folder),
bounds: self.bounds.fold_with(folder),
}
}
+impl<'tcx> TypeFoldable<'tcx> for &'tcx [Ty<'tcx>] {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ let tys = self.iter().map(|t| t.fold_with(folder)).collect();
+ folder.tcx().mk_type_list(tys)
+ }
+
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+ self.iter().any(|t| t.visit_with(visitor))
+ }
+}
+
impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let sty = match self.sty {
ty::TyBox(typ) => ty::TyBox(typ.fold_with(folder)),
- ty::TyRawPtr(ref tm) => ty::TyRawPtr(tm.fold_with(folder)),
+ ty::TyRawPtr(tm) => ty::TyRawPtr(tm.fold_with(folder)),
ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz),
ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)),
- ty::TyEnum(tid, ref substs) => {
- let substs = substs.fold_with(folder);
- ty::TyEnum(tid, folder.tcx().mk_substs(substs))
- }
+ ty::TyEnum(tid, substs) => ty::TyEnum(tid, substs.fold_with(folder)),
ty::TyTrait(ref trait_ty) => ty::TyTrait(trait_ty.fold_with(folder)),
- ty::TyTuple(ref ts) => ty::TyTuple(ts.fold_with(folder)),
- ty::TyFnDef(def_id, substs, ref f) => {
- let substs = substs.fold_with(folder);
- let bfn = f.fold_with(folder);
+ ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)),
+ ty::TyFnDef(def_id, substs, f) => {
ty::TyFnDef(def_id,
- folder.tcx().mk_substs(substs),
- folder.tcx().mk_bare_fn(bfn))
- }
- ty::TyFnPtr(ref f) => {
- let bfn = f.fold_with(folder);
- ty::TyFnPtr(folder.tcx().mk_bare_fn(bfn))
- }
- ty::TyRef(r, ref tm) => {
- let r = r.fold_with(folder);
- ty::TyRef(folder.tcx().mk_region(r), tm.fold_with(folder))
- }
- ty::TyStruct(did, ref substs) => {
- let substs = substs.fold_with(folder);
- ty::TyStruct(did, folder.tcx().mk_substs(substs))
+ substs.fold_with(folder),
+ f.fold_with(folder))
}
- ty::TyClosure(did, ref substs) => {
- ty::TyClosure(did, substs.fold_with(folder))
+ ty::TyFnPtr(f) => ty::TyFnPtr(f.fold_with(folder)),
+ ty::TyRef(ref r, tm) => {
+ ty::TyRef(r.fold_with(folder), tm.fold_with(folder))
}
+ ty::TyStruct(did, substs) => ty::TyStruct(did, substs.fold_with(folder)),
+ ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)),
ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)),
ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
folder.tcx().mk_ty(sty)
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_ty(*self)
}
ty::TySlice(typ) => typ.visit_with(visitor),
ty::TyEnum(_tid, ref substs) => substs.visit_with(visitor),
ty::TyTrait(ref trait_ty) => trait_ty.visit_with(visitor),
- ty::TyTuple(ref ts) => ts.visit_with(visitor),
+ ty::TyTuple(ts) => ts.visit_with(visitor),
ty::TyFnDef(_, substs, ref f) => {
substs.visit_with(visitor) || f.visit_with(visitor)
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::BareFnTy<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
- ty::BareFnTy { sig: self.sig.fold_with(folder),
- abi: self.abi,
- unsafety: self.unsafety }
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::BareFnTy<'tcx> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ let fty = ty::BareFnTy {
+ sig: self.sig.fold_with(folder),
+ abi: self.abi,
+ unsafety: self.unsafety
+ };
+ folder.tcx().mk_bare_fn(fty)
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_bare_fn_ty(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::ClosureTy<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ClosureTy {
sig: self.sig.fold_with(folder),
unsafety: self.unsafety,
}
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_closure_ty(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TypeAndMut { ty: self.ty.fold_with(folder), mutbl: self.mutbl }
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_mt(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::FnOutput<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
ty::FnConverging(ref ty) => ty::FnConverging(ty.fold_with(folder)),
ty::FnDiverging => ty::FnDiverging
}
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_output(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::FnSig { inputs: self.inputs.fold_with(folder),
output: self.output.fold_with(folder),
variadic: self.variadic }
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_fn_sig(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::TraitRef<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
- let substs = self.substs.fold_with(folder);
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TraitRef {
def_id: self.def_id,
- substs: folder.tcx().mk_substs(substs),
+ substs: self.substs.fold_with(folder),
}
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_trait_ref(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::ImplHeader<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ImplHeader {
impl_def_id: self.impl_def_id,
self_ty: self.self_ty.fold_with(folder),
}
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_impl_header(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::Region {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self {
*self
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_region(*self)
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for subst::Substs<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
- subst::Substs { regions: self.regions.fold_with(folder),
- types: self.types.fold_with(folder) }
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Region {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self {
+ *self
+ }
+
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ let region = folder.fold_region(**self);
+ folder.tcx().mk_region(region)
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
+ false
+ }
+
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+ visitor.visit_region(**self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx subst::Substs<'tcx> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ let substs = subst::Substs {
+ regions: self.regions.fold_with(folder),
+ types: self.types.fold_with(folder)
+ };
+ folder.tcx().mk_substs(substs)
+ }
+
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_substs(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
- let func_substs = self.func_substs.fold_with(folder);
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ClosureSubsts {
- func_substs: folder.tcx().mk_substs(func_substs),
+ func_substs: self.func_substs.fold_with(folder),
upvar_tys: self.upvar_tys.fold_with(folder),
}
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::ItemSubsts<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ItemSubsts {
substs: self.substs.fold_with(folder),
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoRef<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
- ty::adjustment::AutoPtr(r, m) => {
- let r = r.fold_with(folder);
- ty::adjustment::AutoPtr(folder.tcx().mk_region(r), m)
+ ty::adjustment::AutoPtr(ref r, m) => {
+ ty::adjustment::AutoPtr(r.fold_with(folder), m)
}
ty::adjustment::AutoUnsafe(m) => ty::adjustment::AutoUnsafe(m)
}
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_autoref(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::BuiltinBounds {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self {
*self
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialBounds<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ExistentialBounds {
region_bound: self.region_bound.fold_with(folder),
builtin_bounds: self.builtin_bounds,
}
}
- fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
folder.fold_existential_bounds(self)
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TypeParameterDef {
name: self.name,
def_id: self.def_id,
}
impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
ty::ObjectLifetimeDefault::Ambiguous =>
ty::ObjectLifetimeDefault::Ambiguous,
}
impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::RegionParameterDef {
name: self.name,
def_id: self.def_id,
}
impl<'tcx> TypeFoldable<'tcx> for ty::Generics<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::Generics {
types: self.types.fold_with(folder),
regions: self.regions.fold_with(folder),
}
impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::GenericPredicates {
predicates: self.predicates.fold_with(folder),
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
ty::Predicate::Trait(ref a) =>
ty::Predicate::Trait(a.fold_with(folder)),
}
impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionPredicate<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ProjectionPredicate {
projection_ty: self.projection_ty.fold_with(folder),
ty: self.ty.fold_with(folder),
}
impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionTy<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ProjectionTy {
trait_ref: self.trait_ref.fold_with(folder),
item_name: self.item_name,
}
impl<'tcx> TypeFoldable<'tcx> for ty::InstantiatedPredicates<'tcx> {
- fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::InstantiatedPredicates {
predicates: self.predicates.fold_with(folder),
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::EquatePredicate(self.0.fold_with(folder),
self.1.fold_with(folder))
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TraitPredicate {
trait_ref: self.trait_ref.fold_with(folder)
}
where T : TypeFoldable<'tcx>,
U : TypeFoldable<'tcx>,
{
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::OutlivesPredicate(self.0.fold_with(folder),
self.1.fold_with(folder))
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ClosureUpvar {
def: self.def,
span: self.span,
}
}
-impl<'a, 'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'a, 'tcx> where 'tcx: 'a {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+impl<'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'tcx> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ParameterEnvironment {
- tcx: self.tcx,
free_substs: self.free_substs.fold_with(folder),
implicit_region_bound: self.implicit_region_bound.fold_with(folder),
caller_bounds: self.caller_bounds.fold_with(folder),
- selection_cache: traits::SelectionCache::new(),
- evaluation_cache: traits::EvaluationCache::new(),
free_id_outlive: self.free_id_outlive,
}
}
}
impl<'tcx> TypeFoldable<'tcx> for ty::TypeScheme<'tcx> {
- fn super_fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TypeScheme {
generics: self.generics.fold_with(folder),
ty: self.ty.fold_with(folder),
use hir::def_id::DefId;
use middle::region;
use ty::subst::{self, Substs};
-use traits;
use ty::{self, AdtDef, ToPredicate, TypeFlags, Ty, TyCtxt, TyS, TypeFoldable};
use util::common::ErrorReported;
use syntax::ast::{self, Name};
use syntax::parse::token::keywords;
-use serialize::{Decodable, Decoder};
+use serialize::{Decodable, Decoder, Encodable, Encoder};
use hir;
/// Substs here, possibly against intuition, *may* contain `TyParam`s.
/// That is, even after substitution it is possible that there are type
/// variables. This happens when the `TyEnum` corresponds to an enum
- /// definition and not a concrete use of it. To get the correct `TyEnum`
- /// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in
- /// the `ast_ty_to_ty_cache`. This is probably true for `TyStruct` as
- /// well.
+ /// definition and not a concrete use of it. This is true for `TyStruct`
+ /// as well.
TyEnum(AdtDef<'tcx>, &'tcx Substs<'tcx>),
/// A structure type, defined with `struct`.
/// The anonymous type of a closure. Used to represent the type of
/// `|a| a`.
- TyClosure(DefId, Box<ClosureSubsts<'tcx>>),
+ TyClosure(DefId, ClosureSubsts<'tcx>),
/// A tuple type. For example, `(i32, bool)`.
- TyTuple(Vec<Ty<'tcx>>),
+ TyTuple(&'tcx [Ty<'tcx>]),
/// The projection of an associated type. For example,
/// `<T as Trait<..>>::N`.
/// closure C wind up influencing the decisions we ought to make for
/// closure C (which would then require fixed point iteration to
/// handle). Plus it fixes an ICE. :P
-#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct ClosureSubsts<'tcx> {
/// Lifetime and type parameters from the enclosing function.
/// These are separated out because trans wants to pass them around
/// The types of the upvars. The list parallels the freevars and
/// `upvar_borrows` lists. These are kept distinct so that we can
/// easily index into them.
- pub upvar_tys: Vec<Ty<'tcx>>
+ pub upvar_tys: &'tcx [Ty<'tcx>]
}
-impl<'tcx> Decodable for &'tcx ClosureSubsts<'tcx> {
- fn decode<S: Decoder>(s: &mut S) -> Result<&'tcx ClosureSubsts<'tcx>, S::Error> {
- let closure_substs = Decodable::decode(s)?;
- let dummy_def_id: DefId = unsafe { mem::zeroed() };
-
- cstore::tls::with_decoding_context(s, |dcx, _| {
- // Intern the value
- let ty = dcx.tcx().mk_closure_from_closure_substs(dummy_def_id,
- Box::new(closure_substs));
- match ty.sty {
- TyClosure(_, ref closure_substs) => Ok(&**closure_substs),
- _ => bug!()
- }
+impl<'tcx> Encodable for ClosureSubsts<'tcx> {
+ fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+ (self.func_substs, self.upvar_tys).encode(s)
+ }
+}
+
+impl<'tcx> Decodable for ClosureSubsts<'tcx> {
+ fn decode<D: Decoder>(d: &mut D) -> Result<ClosureSubsts<'tcx>, D::Error> {
+ let (func_substs, upvar_tys) = Decodable::decode(d)?;
+ cstore::tls::with_decoding_context(d, |dcx, _| {
+ Ok(ClosureSubsts {
+ func_substs: func_substs,
+ upvar_tys: dcx.tcx().mk_type_list(upvar_tys)
+ })
})
}
}
pub bounds: ExistentialBounds<'tcx>,
}
-impl<'tcx> TraitTy<'tcx> {
+impl<'a, 'gcx, 'tcx> TraitTy<'tcx> {
pub fn principal_def_id(&self) -> DefId {
self.principal.0.def_id
}
/// we convert the principal trait-ref into a normal trait-ref,
/// you must give *some* self-type. A common choice is `mk_err()`
/// or some skolemized type.
- pub fn principal_trait_ref_with_self_ty(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn principal_trait_ref_with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
self_ty: Ty<'tcx>)
-> ty::PolyTraitRef<'tcx>
{
})
}
- pub fn projection_bounds_with_self_ty(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn projection_bounds_with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
self_ty: Ty<'tcx>)
-> Vec<ty::PolyProjectionPredicate<'tcx>>
{
pub name: Name,
}
-impl ParamTy {
+impl<'a, 'gcx, 'tcx> ParamTy {
pub fn new(space: subst::ParamSpace,
index: u32,
name: Name)
ParamTy::new(def.space, def.index, def.name)
}
- pub fn to_ty<'tcx>(self, tcx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn to_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
tcx.mk_param(self.space, self.idx, self.name)
}
/// to be used. These also support explicit bounds: both the internally-stored
/// *scope*, which the region is assumed to outlive, as well as other
/// relations stored in the `FreeRegionMap`. Note that these relations
-/// aren't checked when you `make_subregion` (or `mk_eqty`), only by
+/// aren't checked when you `make_subregion` (or `eq_types`), only by
/// `resolve_regions_and_report_errors`.
///
/// When working with higher-ranked types, some region relations aren't
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct BuiltinBounds(EnumSet<BuiltinBound>);
-impl BuiltinBounds {
+impl<'a, 'gcx, 'tcx> BuiltinBounds {
pub fn empty() -> BuiltinBounds {
BuiltinBounds(EnumSet::new())
}
self.into_iter()
}
- pub fn to_predicates<'tcx>(&self,
- tcx: &TyCtxt<'tcx>,
- self_ty: Ty<'tcx>) -> Vec<ty::Predicate<'tcx>> {
+ pub fn to_predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ self_ty: Ty<'tcx>)
+ -> Vec<ty::Predicate<'tcx>> {
self.iter().filter_map(|builtin_bound|
- match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, self_ty) {
+ match tcx.trait_ref_for_builtin_bound(builtin_bound, self_ty) {
Ok(trait_ref) => Some(trait_ref.to_predicate()),
Err(ErrorReported) => { None }
}
}
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn try_add_builtin_trait(&self,
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn try_add_builtin_trait(self,
trait_def_id: DefId,
builtin_bounds: &mut EnumSet<BuiltinBound>)
-> bool
}
// Type utilities
-impl<'tcx> TyS<'tcx> {
+impl<'a, 'gcx, 'tcx> TyS<'tcx> {
pub fn as_opt_param_ty(&self) -> Option<ty::ParamTy> {
match self.sty {
ty::TyParam(ref d) => Some(d.clone()),
}
}
- pub fn is_empty(&self, _cx: &TyCtxt) -> bool {
+ pub fn is_empty(&self, _cx: TyCtxt) -> bool {
// FIXME(#24885): be smarter here
match self.sty {
TyEnum(def, _) | TyStruct(def, _) => def.is_empty(),
}
}
- pub fn sequence_element_type(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn sequence_element_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match self.sty {
TyArray(ty, _) | TySlice(ty) => ty,
- TyStr => cx.mk_mach_uint(ast::UintTy::U8),
+ TyStr => tcx.mk_mach_uint(ast::UintTy::U8),
_ => bug!("sequence_element_type called on non-sequence value: {}", self),
}
}
- pub fn simd_type(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match self.sty {
TyStruct(def, substs) => {
- def.struct_variant().fields[0].ty(cx, substs)
+ def.struct_variant().fields[0].ty(tcx, substs)
}
_ => bug!("simd_type called on invalid type")
}
}
- pub fn simd_size(&self, _cx: &TyCtxt) -> usize {
+ pub fn simd_size(&self, _cx: TyCtxt) -> usize {
match self.sty {
TyStruct(def, _) => def.struct_variant().fields.len(),
_ => bug!("simd_size called on invalid type")
pub regions: VecPerParamSpace<ty::Region>,
}
-impl<'tcx> Substs<'tcx> {
+impl<'a, 'gcx, 'tcx> Substs<'tcx> {
pub fn new(t: VecPerParamSpace<Ty<'tcx>>,
r: VecPerParamSpace<ty::Region>)
-> Substs<'tcx>
Substs { types: types, regions: regions }
}
- pub fn with_method_from_subst(self, other: &Substs<'tcx>) -> Substs<'tcx> {
- let Substs { types, regions } = self;
+ pub fn with_method_from_subst(&self, other: &Substs<'tcx>) -> Substs<'tcx> {
+ let Substs { types, regions } = self.clone();
let types = types.with_slice(FnSpace, other.types.get_slice(FnSpace));
let regions = regions.with_slice(FnSpace, other.regions.get_slice(FnSpace));
Substs { types: types, regions: regions }
}
/// Creates a trait-ref out of this substs, ignoring the FnSpace substs
- pub fn to_trait_ref(&self, tcx: &TyCtxt<'tcx>, trait_id: DefId)
+ pub fn to_trait_ref(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_id: DefId)
-> ty::TraitRef<'tcx> {
let Substs { mut types, mut regions } = self.clone();
types.truncate(FnSpace, 0);
// there is more information available (for better errors).
pub trait Subst<'tcx> : Sized {
- fn subst(&self, tcx: &TyCtxt<'tcx>, substs: &Substs<'tcx>) -> Self {
+ fn subst<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ substs: &Substs<'tcx>) -> Self {
self.subst_spanned(tcx, substs, None)
}
- fn subst_spanned(&self, tcx: &TyCtxt<'tcx>,
- substs: &Substs<'tcx>,
- span: Option<Span>)
- -> Self;
+ fn subst_spanned<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ substs: &Substs<'tcx>,
+ span: Option<Span>)
+ -> Self;
}
impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T {
- fn subst_spanned(&self,
- tcx: &TyCtxt<'tcx>,
- substs: &Substs<'tcx>,
- span: Option<Span>)
- -> T
+ fn subst_spanned<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ substs: &Substs<'tcx>,
+ span: Option<Span>)
+ -> T
{
let mut folder = SubstFolder { tcx: tcx,
substs: substs,
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
-struct SubstFolder<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+struct SubstFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
substs: &'a Substs<'tcx>,
// The location for which the substitution is performed, if available.
region_binders_passed: u32,
}
-impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> { self.tcx }
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
self.region_binders_passed += 1;
}
}
-impl<'a,'tcx> SubstFolder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
// Look up the type in the substitutions. It really should be in there.
let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize);
pub flags: Cell<TraitFlags>
}
-impl<'tcx> TraitDef<'tcx> {
+impl<'a, 'gcx, 'tcx> TraitDef<'tcx> {
pub fn new(unsafety: hir::Unsafety,
paren_sugar: bool,
generics: ty::Generics<'tcx>,
);
}
- fn write_trait_impls(&self, tcx: &TyCtxt<'tcx>) {
+ fn write_trait_impls(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) {
tcx.dep_graph.write(DepNode::TraitImpls(self.trait_ref.def_id));
}
- fn read_trait_impls(&self, tcx: &TyCtxt<'tcx>) {
+ fn read_trait_impls(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) {
tcx.dep_graph.read(DepNode::TraitImpls(self.trait_ref.def_id));
}
/// Records a basic trait-to-implementation mapping.
///
/// Returns `true` iff the impl has not previously been recorded.
- fn record_impl(&self,
- tcx: &TyCtxt<'tcx>,
+ fn record_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId,
impl_trait_ref: TraitRef<'tcx>)
-> bool {
}
/// Records a trait-to-implementation mapping for a crate-local impl.
- pub fn record_local_impl(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn record_local_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId,
impl_trait_ref: TraitRef<'tcx>) {
assert!(impl_def_id.is_local());
/// The `parent_impl` is the immediately-less-specialized impl, or the
/// trait's def ID if the impl is not a specialization -- information that
/// should be pulled from the metadata.
- pub fn record_remote_impl(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn record_remote_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId,
impl_trait_ref: TraitRef<'tcx>,
parent_impl: DefId) {
/// Adds a local impl into the specialization graph, returning an error with
/// overlap information if the impl overlaps but does not specialize an
/// existing impl.
- pub fn add_impl_for_specialization<'a>(&self,
- tcx: &'a TyCtxt<'tcx>,
- impl_def_id: DefId)
- -> Result<(), traits::Overlap<'a, 'tcx>> {
+ pub fn add_impl_for_specialization(&self,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ impl_def_id: DefId)
+ -> Result<(), traits::OverlapError> {
assert!(impl_def_id.is_local());
self.specialization_graph.borrow_mut()
.insert(tcx, impl_def_id)
}
- pub fn ancestors<'a>(&'a self, of_impl: DefId) -> specialization_graph::Ancestors<'a, 'tcx> {
+ pub fn ancestors(&'a self, of_impl: DefId) -> specialization_graph::Ancestors<'a, 'tcx> {
specialization_graph::ancestors(self, of_impl)
}
- pub fn for_each_impl<F: FnMut(DefId)>(&self, tcx: &TyCtxt<'tcx>, mut f: F) {
- self.read_trait_impls(tcx);
+ pub fn for_each_impl<F: FnMut(DefId)>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, mut f: F) {
+ self.read_trait_impls(tcx);
tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id);
for &impl_def_id in self.blanket_impls.borrow().iter() {
/// Iterate over every impl that could possibly match the
/// self-type `self_ty`.
pub fn for_each_relevant_impl<F: FnMut(DefId)>(&self,
- tcx: &TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
self_ty: Ty<'tcx>,
mut f: F)
{
use hir::svh::Svh;
use hir::def_id::DefId;
use ty::subst;
-use infer;
+use infer::InferCtxt;
use hir::pat_util;
use traits::{self, ProjectionMode};
use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable};
use hir;
pub trait IntTypeExt {
- fn to_ty<'tcx>(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx>;
- fn disr_incr(&self, val: Disr) -> Option<Disr>;
+ fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
+ fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option<Disr>)
+ -> Option<Disr>;
fn assert_ty_matches(&self, val: Disr);
- fn initial_discriminant(&self, tcx: &TyCtxt) -> Disr;
+ fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr;
}
impl IntTypeExt for attr::IntType {
- fn to_ty<'tcx>(&self, cx: &TyCtxt<'tcx>) -> Ty<'tcx> {
+ fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
match *self {
- SignedInt(ast::IntTy::I8) => cx.types.i8,
- SignedInt(ast::IntTy::I16) => cx.types.i16,
- SignedInt(ast::IntTy::I32) => cx.types.i32,
- SignedInt(ast::IntTy::I64) => cx.types.i64,
- SignedInt(ast::IntTy::Is) => cx.types.isize,
- UnsignedInt(ast::UintTy::U8) => cx.types.u8,
- UnsignedInt(ast::UintTy::U16) => cx.types.u16,
- UnsignedInt(ast::UintTy::U32) => cx.types.u32,
- UnsignedInt(ast::UintTy::U64) => cx.types.u64,
- UnsignedInt(ast::UintTy::Us) => cx.types.usize,
+ SignedInt(ast::IntTy::I8) => tcx.types.i8,
+ SignedInt(ast::IntTy::I16) => tcx.types.i16,
+ SignedInt(ast::IntTy::I32) => tcx.types.i32,
+ SignedInt(ast::IntTy::I64) => tcx.types.i64,
+ SignedInt(ast::IntTy::Is) => tcx.types.isize,
+ UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
+ UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
+ UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
+ UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
+ UnsignedInt(ast::UintTy::Us) => tcx.types.usize,
}
}
- fn initial_discriminant(&self, tcx: &TyCtxt) -> Disr {
+ fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr {
match *self {
SignedInt(ast::IntTy::I8) => ConstInt::I8(0),
SignedInt(ast::IntTy::I16) => ConstInt::I16(0),
}
}
- fn disr_incr(&self, val: Disr) -> Option<Disr> {
- self.assert_ty_matches(val);
- (val + ConstInt::Infer(1)).ok()
+ fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option<Disr>)
+ -> Option<Disr> {
+ if let Some(val) = val {
+ self.assert_ty_matches(val);
+ (val + ConstInt::Infer(1)).ok()
+ } else {
+ Some(self.initial_discriminant(tcx))
+ }
}
}
SelfRecursive,
}
-impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> {
- pub fn can_type_implement_copy(&self, self_type: Ty<'tcx>, span: Span)
- -> Result<(),CopyImplementationError> {
- let tcx = self.tcx;
-
+impl<'tcx> ParameterEnvironment<'tcx> {
+ pub fn can_type_implement_copy<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ self_type: Ty<'tcx>, span: Span)
+ -> Result<(),CopyImplementationError> {
// FIXME: (@jroesch) float this code up
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- Some(self.clone()),
- ProjectionMode::Topmost);
-
- let adt = match self_type.sty {
- ty::TyStruct(struct_def, substs) => {
- for field in struct_def.all_fields() {
- let field_ty = field.ty(tcx, substs);
- if infcx.type_moves_by_default(field_ty, span) {
- return Err(CopyImplementationError::InfrigingField(
- field.name))
- }
- }
- struct_def
- }
- ty::TyEnum(enum_def, substs) => {
- for variant in &enum_def.variants {
- for field in &variant.fields {
+ tcx.infer_ctxt(None, Some(self.clone()),
+ ProjectionMode::Topmost).enter(|infcx| {
+ let adt = match self_type.sty {
+ ty::TyStruct(struct_def, substs) => {
+ for field in struct_def.all_fields() {
let field_ty = field.ty(tcx, substs);
if infcx.type_moves_by_default(field_ty, span) {
- return Err(CopyImplementationError::InfrigingVariant(
- variant.name))
+ return Err(CopyImplementationError::InfrigingField(
+ field.name))
}
}
+ struct_def
}
- enum_def
- }
- _ => return Err(CopyImplementationError::NotAnAdt),
- };
+ ty::TyEnum(enum_def, substs) => {
+ for variant in &enum_def.variants {
+ for field in &variant.fields {
+ let field_ty = field.ty(tcx, substs);
+ if infcx.type_moves_by_default(field_ty, span) {
+ return Err(CopyImplementationError::InfrigingVariant(
+ variant.name))
+ }
+ }
+ }
+ enum_def
+ }
+ _ => return Err(CopyImplementationError::NotAnAdt)
+ };
- if adt.has_dtor() {
- return Err(CopyImplementationError::HasDestructor)
- }
+ if adt.has_dtor() {
+ return Err(CopyImplementationError::HasDestructor);
+ }
- Ok(())
+ Ok(())
+ })
}
}
-impl<'tcx> TyCtxt<'tcx> {
- pub fn pat_contains_ref_binding(&self, pat: &hir::Pat) -> Option<hir::Mutability> {
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn pat_contains_ref_binding(self, pat: &hir::Pat) -> Option<hir::Mutability> {
pat_util::pat_contains_ref_binding(&self.def_map, pat)
}
- pub fn arm_contains_ref_binding(&self, arm: &hir::Arm) -> Option<hir::Mutability> {
+ pub fn arm_contains_ref_binding(self, arm: &hir::Arm) -> Option<hir::Mutability> {
pat_util::arm_contains_ref_binding(&self.def_map, arm)
}
/// Returns the type of element at index `i` in tuple or tuple-like type `t`.
/// For an enum `t`, `variant` is None only if `t` is a univariant enum.
- pub fn positional_element_ty(&self,
+ pub fn positional_element_ty(self,
ty: Ty<'tcx>,
i: usize,
variant: Option<DefId>) -> Option<Ty<'tcx>> {
/// Returns the type of element at field `n` in struct or struct-like type `t`.
/// For an enum `t`, `variant` must be some def id.
- pub fn named_element_ty(&self,
+ pub fn named_element_ty(self,
ty: Ty<'tcx>,
n: Name,
variant: Option<DefId>) -> Option<Ty<'tcx>> {
/// Returns the IntType representation.
/// This used to ensure `int_ty` doesn't contain `usize` and `isize`
/// by converting them to their actual types. That doesn't happen anymore.
- pub fn enum_repr_type(&self, opt_hint: Option<&attr::ReprAttr>) -> attr::IntType {
+ pub fn enum_repr_type(self, opt_hint: Option<&attr::ReprAttr>) -> attr::IntType {
match opt_hint {
// Feed in the given type
Some(&attr::ReprInt(_, int_t)) => int_t,
/// Returns the deeply last field of nested structures, or the same type,
/// if not a structure at all. Corresponds to the only possible unsized
/// field, and its type can be used to determine unsizing strategy.
- pub fn struct_tail(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
+ pub fn struct_tail(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
while let TyStruct(def, substs) = ty.sty {
match def.struct_variant().fields.last() {
Some(f) => ty = f.ty(self, substs),
/// structure definitions.
/// For `(Foo<Foo<T>>, Foo<Trait>)`, the result will be `(Foo<T>, Trait)`,
/// whereas struct_tail produces `T`, and `Trait`, respectively.
- pub fn struct_lockstep_tails(&self,
+ pub fn struct_lockstep_tails(self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> (Ty<'tcx>, Ty<'tcx>) {
///
/// Requires that trait definitions have been processed so that we can
/// elaborate predicates and walk supertraits.
- pub fn required_region_bounds(&self,
+ pub fn required_region_bounds(self,
erased_self_ty: Ty<'tcx>,
predicates: Vec<ty::Predicate<'tcx>>)
-> Vec<ty::Region> {
/// Creates a hash of the type `Ty` which will be the same no matter what crate
/// context it's calculated within. This is used by the `type_id` intrinsic.
- pub fn hash_crate_independent(&self, ty: Ty<'tcx>, svh: &Svh) -> u64 {
+ pub fn hash_crate_independent(self, ty: Ty<'tcx>, svh: &Svh) -> u64 {
let mut state = SipHasher::new();
helper(self, ty, svh, &mut state);
return state.finish();
- fn helper<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>, svh: &Svh,
- state: &mut SipHasher) {
+ fn helper<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty<'tcx>, svh: &Svh,
+ state: &mut SipHasher) {
macro_rules! byte { ($b:expr) => { ($b as u8).hash(state) } }
macro_rules! hash { ($e:expr) => { $e.hash(state) } }
/// `adt` that do not strictly outlive the adt value itself.
/// (This allows programs to make cyclic structures without
/// resorting to unasfe means; see RFCs 769 and 1238).
- pub fn is_adt_dtorck(&self, adt: ty::AdtDef<'tcx>) -> bool {
+ pub fn is_adt_dtorck(self, adt: ty::AdtDef) -> bool {
let dtor_method = match adt.destructor() {
Some(dtor) => dtor,
None => return false
}
}
-impl<'tcx> ty::TyS<'tcx> {
- fn impls_bound<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
- bound: ty::BuiltinBound,
- span: Span)
- -> bool
+impl<'a, 'tcx> ty::TyS<'tcx> {
+ fn impls_bound(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ bound: ty::BuiltinBound, span: Span) -> bool
{
- let tcx = param_env.tcx;
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- Some(param_env.clone()),
- ProjectionMode::Topmost);
-
- let is_impld = traits::type_known_to_meet_builtin_bound(&infcx,
- self, bound, span);
-
- debug!("Ty::impls_bound({:?}, {:?}) = {:?}",
- self, bound, is_impld);
-
- is_impld
+ tcx.infer_ctxt(None, Some(param_env.clone()), ProjectionMode::Topmost).enter(|infcx| {
+ traits::type_known_to_meet_builtin_bound(&infcx, self, bound, span)
+ })
}
// FIXME (@jroesch): I made this public to use it, not sure if should be private
- pub fn moves_by_default<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
- span: Span) -> bool {
+ pub fn moves_by_default(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool {
if self.flags.get().intersects(TypeFlags::MOVENESS_CACHED) {
return self.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT);
}
TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) |
TyClosure(..) | TyEnum(..) | TyStruct(..) |
TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None
- }.unwrap_or_else(|| !self.impls_bound(param_env, ty::BoundCopy, span));
+ }.unwrap_or_else(|| !self.impls_bound(tcx, param_env, ty::BoundCopy, span));
if !self.has_param_types() && !self.has_self_ty() {
self.flags.set(self.flags.get() | if result {
}
#[inline]
- pub fn is_sized<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
- span: Span) -> bool
+ pub fn is_sized(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool
{
if self.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) {
return self.flags.get().intersects(TypeFlags::IS_SIZED);
}
- self.is_sized_uncached(param_env, span)
+ self.is_sized_uncached(tcx, param_env, span)
}
- fn is_sized_uncached<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>,
- span: Span) -> bool {
+ fn is_sized_uncached(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool {
assert!(!self.needs_infer());
// Fast-path for primitive types
TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) |
TyInfer(..) | TyError => None
- }.unwrap_or_else(|| self.impls_bound(param_env, ty::BoundSized, span));
+ }.unwrap_or_else(|| self.impls_bound(tcx, param_env, ty::BoundSized, span));
if !self.has_param_types() && !self.has_self_ty() {
self.flags.set(self.flags.get() | if result {
}
#[inline]
- pub fn layout<'a>(&'tcx self, infcx: &infer::InferCtxt<'a, 'tcx>)
- -> Result<&'tcx Layout, LayoutError<'tcx>> {
+ pub fn layout<'lcx>(&'tcx self, infcx: &InferCtxt<'a, 'tcx, 'lcx>)
+ -> Result<&'tcx Layout, LayoutError<'tcx>> {
+ let tcx = infcx.tcx.global_tcx();
let can_cache = !self.has_param_types() && !self.has_self_ty();
if can_cache {
- if let Some(&cached) = infcx.tcx.layout_cache.borrow().get(&self) {
+ if let Some(&cached) = tcx.layout_cache.borrow().get(&self) {
return Ok(cached);
}
}
let layout = Layout::compute_uncached(self, infcx)?;
- let layout = infcx.tcx.intern_layout(layout);
+ let layout = tcx.intern_layout(layout);
if can_cache {
- infcx.tcx.layout_cache.borrow_mut().insert(self, layout);
+ tcx.layout_cache.borrow_mut().insert(self, layout);
}
Ok(layout)
}
/// Check whether a type is representable. This means it cannot contain unboxed
/// structural recursion. This check is needed for structs and enums.
- pub fn is_representable(&'tcx self, cx: &TyCtxt<'tcx>, sp: Span) -> Representability {
+ pub fn is_representable(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span)
+ -> Representability {
// Iterate until something non-representable is found
- fn find_nonrepresentable<'tcx, It: Iterator<Item=Ty<'tcx>>>(cx: &TyCtxt<'tcx>,
- sp: Span,
- seen: &mut Vec<Ty<'tcx>>,
- iter: It)
- -> Representability {
+ fn find_nonrepresentable<'a, 'tcx, It>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ sp: Span,
+ seen: &mut Vec<Ty<'tcx>>,
+ iter: It)
+ -> Representability
+ where It: Iterator<Item=Ty<'tcx>> {
iter.fold(Representability::Representable,
- |r, ty| cmp::max(r, is_type_structurally_recursive(cx, sp, seen, ty)))
+ |r, ty| cmp::max(r, is_type_structurally_recursive(tcx, sp, seen, ty)))
}
- fn are_inner_types_recursive<'tcx>(cx: &TyCtxt<'tcx>, sp: Span,
- seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
- -> Representability {
+ fn are_inner_types_recursive<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span,
+ seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
+ -> Representability {
match ty.sty {
TyTuple(ref ts) => {
- find_nonrepresentable(cx, sp, seen, ts.iter().cloned())
+ find_nonrepresentable(tcx, sp, seen, ts.iter().cloned())
}
// Fixed-length vectors.
// FIXME(#11924) Behavior undecided for zero-length vectors.
TyArray(ty, _) => {
- is_type_structurally_recursive(cx, sp, seen, ty)
+ is_type_structurally_recursive(tcx, sp, seen, ty)
}
TyStruct(def, substs) | TyEnum(def, substs) => {
- find_nonrepresentable(cx,
+ find_nonrepresentable(tcx,
sp,
seen,
- def.all_fields().map(|f| f.ty(cx, substs)))
+ def.all_fields().map(|f| f.ty(tcx, substs)))
}
TyClosure(..) => {
// this check is run on type definitions, so we don't expect
// Does the type `ty` directly (without indirection through a pointer)
// contain any types on stack `seen`?
- fn is_type_structurally_recursive<'tcx>(cx: &TyCtxt<'tcx>,
- sp: Span,
- seen: &mut Vec<Ty<'tcx>>,
- ty: Ty<'tcx>) -> Representability {
+ fn is_type_structurally_recursive<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ sp: Span,
+ seen: &mut Vec<Ty<'tcx>>,
+ ty: Ty<'tcx>) -> Representability {
debug!("is_type_structurally_recursive: {:?}", ty);
match ty.sty {
// For structs and enums, track all previously seen types by pushing them
// onto the 'seen' stack.
seen.push(ty);
- let out = are_inner_types_recursive(cx, sp, seen, ty);
+ let out = are_inner_types_recursive(tcx, sp, seen, ty);
seen.pop();
out
}
_ => {
// No need to push in other cases.
- are_inner_types_recursive(cx, sp, seen, ty)
+ are_inner_types_recursive(tcx, sp, seen, ty)
}
}
}
// contains a different, structurally recursive type, maintain a stack
// of seen types and check recursion for each of them (issues #3008, #3779).
let mut seen: Vec<Ty> = Vec::new();
- let r = is_type_structurally_recursive(cx, sp, &mut seen, self);
+ let r = is_type_structurally_recursive(tcx, sp, &mut seen, self);
debug!("is_type_representable: {:?} is {:?}", self, r);
r
}
use hir::def_id::DefId;
use infer::InferCtxt;
-use ty::outlives::{self, Component};
+use ty::outlives::Component;
use ty::subst::Substs;
use traits;
use ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable};
/// inference variable, returns `None`, because we are not able to
/// make any progress at all. This is to prevent "livelock" where we
/// say "$0 is WF if $0 is WF".
-pub fn obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- body_id: ast::NodeId,
- ty: Ty<'tcx>,
- span: Span)
- -> Option<Vec<traits::PredicateObligation<'tcx>>>
+pub fn obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ body_id: ast::NodeId,
+ ty: Ty<'tcx>,
+ span: Span)
+ -> Option<Vec<traits::PredicateObligation<'tcx>>>
{
let mut wf = WfPredicates { infcx: infcx,
body_id: body_id,
/// well-formed. For example, if there is a trait `Set` defined like
/// `trait Set<K:Eq>`, then the trait reference `Foo: Set<Bar>` is WF
/// if `Bar: Eq`.
-pub fn trait_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- body_id: ast::NodeId,
- trait_ref: &ty::TraitRef<'tcx>,
- span: Span)
- -> Vec<traits::PredicateObligation<'tcx>>
+pub fn trait_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ body_id: ast::NodeId,
+ trait_ref: &ty::TraitRef<'tcx>,
+ span: Span)
+ -> Vec<traits::PredicateObligation<'tcx>>
{
let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] };
wf.compute_trait_ref(trait_ref);
wf.normalize()
}
-pub fn predicate_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- body_id: ast::NodeId,
- predicate: &ty::Predicate<'tcx>,
- span: Span)
- -> Vec<traits::PredicateObligation<'tcx>>
+pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ body_id: ast::NodeId,
+ predicate: &ty::Predicate<'tcx>,
+ span: Span)
+ -> Vec<traits::PredicateObligation<'tcx>>
{
let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] };
/// Compute the implied bounds that a callee/impl can assume based on
/// the fact that caller/projector has ensured that `ty` is WF. See
/// the `ImpliedBound` type for more details.
-pub fn implied_bounds<'a,'tcx>(
- infcx: &'a InferCtxt<'a,'tcx>,
+pub fn implied_bounds<'a, 'gcx, 'tcx>(
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
body_id: ast::NodeId,
ty: Ty<'tcx>,
span: Span)
match infcx.tcx.no_late_bound_regions(data) {
None => vec![],
Some(ty::OutlivesPredicate(ty_a, r_b)) => {
- let components = outlives::components(infcx, ty_a);
+ let components = infcx.outlives_components(ty_a);
implied_bounds_from_components(r_b, components)
}
},
.collect()
}
-struct WfPredicates<'a,'tcx:'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+struct WfPredicates<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
body_id: ast::NodeId,
span: Span,
out: Vec<traits::PredicateObligation<'tcx>>,
}
-impl<'a,'tcx> WfPredicates<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> {
fn cause(&mut self, code: traits::ObligationCauseCode<'tcx>) -> traits::ObligationCause<'tcx> {
traits::ObligationCause::new(self.span, self.body_id, code)
}
rfc1592: bool) {
if !subty.has_escaping_regions() {
let cause = self.cause(cause);
- match traits::trait_ref_for_builtin_bound(self.infcx.tcx,
- ty::BoundSized,
- subty) {
+ match self.infcx.tcx.trait_ref_for_builtin_bound(ty::BoundSized, subty) {
Ok(trait_ref) => {
let predicate = trait_ref.to_predicate();
let predicate = if rfc1592 {
/// they declare `trait SomeTrait : 'static`, for example, then
/// `'static` would appear in the list. The hard work is done by
/// `ty::required_region_bounds`, see that for more information.
-pub fn object_region_bounds<'tcx>(
- tcx: &TyCtxt<'tcx>,
+pub fn object_region_bounds<'a, 'gcx, 'tcx>(
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
principal: &ty::PolyTraitRef<'tcx>,
others: ty::BuiltinBounds)
-> Vec<ty::Region>
use ty::TyClosure;
use ty::{TyBox, TyTrait, TyInt, TyUint, TyInfer};
use ty::{self, Ty, TyCtxt, TypeFoldable};
+use ty::fold::{TypeFolder, TypeVisitor};
use std::cell::Cell;
use std::fmt;
Value
}
-fn number_of_supplied_defaults<'tcx, GG>(tcx: &ty::TyCtxt<'tcx>,
- substs: &subst::Substs,
- space: subst::ParamSpace,
- get_generics: GG)
- -> usize
- where GG: FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx>
+fn number_of_supplied_defaults<'a, 'gcx, 'tcx, GG>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ substs: &subst::Substs,
+ space: subst::ParamSpace,
+ get_generics: GG)
+ -> usize
+ where GG: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> ty::Generics<'tcx>
{
let generics = get_generics(tcx);
projections: &[ty::ProjectionPredicate],
get_generics: GG)
-> fmt::Result
- where GG: for<'tcx> FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx>
+ where GG: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> ty::Generics<'tcx>
{
if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) {
write!(f, "<{} as ", self_ty)?;
Ok(())
}
-fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter,
- tcx: &TyCtxt<'tcx>,
- original: &ty::Binder<T>,
- lifted: Option<ty::Binder<U>>) -> fmt::Result
+fn in_binder<'a, 'gcx, 'tcx, T, U>(f: &mut fmt::Formatter,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ original: &ty::Binder<T>,
+ lifted: Option<ty::Binder<U>>) -> fmt::Result
where T: fmt::Display, U: fmt::Display + TypeFoldable<'tcx>
{
// Replace any anonymous late-bound regions with named
struct TraitAndProjections<'tcx>(ty::TraitRef<'tcx>, Vec<ty::ProjectionPredicate<'tcx>>);
impl<'tcx> TypeFoldable<'tcx> for TraitAndProjections<'tcx> {
- fn super_fold_with<F:ty::fold::TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
TraitAndProjections(self.0.fold_with(folder), self.1.fold_with(folder))
}
- fn super_visit_with<V: ty::fold::TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.0.visit_with(visitor) || self.1.visit_with(visitor)
}
}
}
}
-impl<'a, 'tcx> fmt::Debug for ty::ParameterEnvironment<'a, 'tcx> {
+impl<'tcx> fmt::Debug for ty::ParameterEnvironment<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ParameterEnvironment(\
free_substs={:?}, \
TyTrait(ref data) => write!(f, "{}", data),
ty::TyProjection(ref data) => write!(f, "{}", data),
TyStr => write!(f, "str"),
- TyClosure(did, ref substs) => ty::tls::with(|tcx| {
+ TyClosure(did, substs) => ty::tls::with(|tcx| {
write!(f, "[closure")?;
if let Some(node_id) = tcx.map.as_local_node_id(did) {
write!(f, "@{:?}", tcx.map.span(node_id))?;
let mut sep = " ";
tcx.with_freevars(node_id, |freevars| {
- for (freevar, upvar_ty) in freevars.iter().zip(&substs.upvar_tys) {
+ for (freevar, upvar_ty) in freevars.iter().zip(substs.upvar_tys) {
let node_id = freevar.def.var_id();
write!(f,
"{}{}:{}",
use borrowck::InteriorKind::{InteriorElement, InteriorField};
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::expr_use_visitor::MutateMode;
-use rustc::infer;
use rustc::middle::mem_categorization as mc;
use rustc::middle::mem_categorization::Categorization;
use rustc::middle::region;
use rustc::ty::{self, TyCtxt};
-use rustc::traits::ProjectionMode;
use syntax::ast;
use syntax::codemap::Span;
use rustc::hir;
dfcx_loans: &'a LoanDataFlow<'a, 'tcx>,
move_data: &'a move_data::FlowedMoveData<'a, 'tcx>,
all_loans: &'a [Loan<'tcx>],
- param_env: &'a ty::ParameterEnvironment<'a, 'tcx>,
+ param_env: &'a ty::ParameterEnvironment<'tcx>,
}
impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> {
debug!("check_loans(body id={})", body.id);
let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id);
- let infcx = infer::new_infer_ctxt(bccx.tcx,
- &bccx.tcx.tables,
- Some(param_env),
- ProjectionMode::AnyFinal);
-
+ let infcx = bccx.tcx.borrowck_fake_infer_ctxt(param_env);
let mut clcx = CheckLoanCtxt {
bccx: bccx,
dfcx_loans: dfcx_loans,
all_loans: all_loans,
param_env: &infcx.parameter_environment
};
-
- {
- let mut euv = euv::ExprUseVisitor::new(&mut clcx, &infcx);
- euv.walk_fn(decl, body);
- }
+ euv::ExprUseVisitor::new(&mut clcx, &infcx).walk_fn(decl, body);
}
#[derive(PartialEq)]
}
impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
- pub fn tcx(&self) -> &'a TyCtxt<'tcx> { self.bccx.tcx }
+ pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.bccx.tcx }
pub fn each_issued_loan<F>(&self, node: ast::NodeId, mut op: F) -> bool where
F: FnMut(&Loan<'tcx>) -> bool,
}
}
-pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>,
- tcx: &TyCtxt<'tcx>,
- sp: Span,
- id: ast::NodeId) {
+pub fn instrument_move_fragments<'a, 'tcx>(this: &MoveData<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ sp: Span,
+ id: ast::NodeId) {
let span_err = tcx.map.attrs(id).iter()
.any(|a| a.check_name("rustc_move_fragments"));
let print = tcx.sess.opts.debugging_opts.print_move_fragments;
///
/// Note: "left-over fragments" means paths that were not directly referenced in moves nor
/// assignments, but must nonetheless be tracked as potential drop obligations.
-pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &TyCtxt<'tcx>) {
+pub fn fixup_fragment_sets<'a, 'tcx>(this: &MoveData<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut fragments = this.fragments.borrow_mut();
/// Adds all of the precisely-tracked siblings of `lp` as potential move paths of interest. For
/// example, if `lp` represents `s.x.j`, then adds moves paths for `s.x.i` and `s.x.k`, the
/// siblings of `s.x.j`.
-fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
- tcx: &TyCtxt<'tcx>,
- gathered_fragments: &mut Vec<Fragment>,
- lp: Rc<LoanPath<'tcx>>,
- origin_id: Option<ast::NodeId>) {
+fn add_fragment_siblings<'a, 'tcx>(this: &MoveData<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ gathered_fragments: &mut Vec<Fragment>,
+ lp: Rc<LoanPath<'tcx>>,
+ origin_id: Option<ast::NodeId>) {
match lp.kind {
LpVar(_) | LpUpvar(..) => {} // Local variables have no siblings.
/// We have determined that `origin_lp` destructures to LpExtend(parent, original_field_name).
/// Based on this, add move paths for all of the siblings of `origin_lp`.
-fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
- tcx: &TyCtxt<'tcx>,
- gathered_fragments: &mut Vec<Fragment>,
- parent_lp: &Rc<LoanPath<'tcx>>,
- mc: mc::MutabilityCategory,
- origin_field_name: &mc::FieldName,
- origin_lp: &Rc<LoanPath<'tcx>>,
- origin_id: Option<ast::NodeId>,
- enum_variant_info: Option<(DefId,
- Rc<LoanPath<'tcx>>)>) {
+fn add_fragment_siblings_for_extension<'a, 'tcx>(this: &MoveData<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ gathered_fragments: &mut Vec<Fragment>,
+ parent_lp: &Rc<LoanPath<'tcx>>,
+ mc: mc::MutabilityCategory,
+ origin_field_name: &mc::FieldName,
+ origin_lp: &Rc<LoanPath<'tcx>>,
+ origin_id: Option<ast::NodeId>,
+ enum_variant_info: Option<(DefId,
+ Rc<LoanPath<'tcx>>)>) {
let parent_ty = parent_lp.to_type();
let mut add_fragment_sibling_local = |field_name, variant_did| {
/// Adds the single sibling `LpExtend(parent, new_field_name)` of `origin_lp` (the original
/// loan-path).
-fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
- tcx: &TyCtxt<'tcx>,
- gathered_fragments: &mut Vec<Fragment>,
- parent: Rc<LoanPath<'tcx>>,
- mc: mc::MutabilityCategory,
- new_field_name: mc::FieldName,
- origin_lp: &Rc<LoanPath<'tcx>>,
- enum_variant_did: Option<DefId>) -> MovePathIndex {
+fn add_fragment_sibling_core<'a, 'tcx>(this: &MoveData<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ gathered_fragments: &mut Vec<Fragment>,
+ parent: Rc<LoanPath<'tcx>>,
+ mc: mc::MutabilityCategory,
+ new_field_name: mc::FieldName,
+ origin_lp: &Rc<LoanPath<'tcx>>,
+ enum_variant_did: Option<DefId>)
+ -> MovePathIndex {
let opt_variant_did = match parent.kind {
LpDowncast(_, variant_did) => Some(variant_did),
LpVar(..) | LpUpvar(..) | LpExtend(..) => enum_variant_did,
use borrowck::*;
use borrowck::move_data::MoveData;
use rustc::middle::expr_use_visitor as euv;
-use rustc::infer;
use rustc::middle::mem_categorization as mc;
use rustc::middle::mem_categorization::Categorization;
use rustc::middle::region;
use rustc::ty::{self, TyCtxt};
-use rustc::traits::ProjectionMode;
use syntax::ast;
use syntax::codemap::Span;
};
let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id);
- let infcx = infer::new_infer_ctxt(bccx.tcx,
- &bccx.tcx.tables,
- Some(param_env),
- ProjectionMode::AnyFinal);
- {
- let mut euv = euv::ExprUseVisitor::new(&mut glcx, &infcx);
- euv.walk_fn(decl, body);
- }
+ let infcx = bccx.tcx.borrowck_fake_infer_ctxt(param_env);
+ euv::ExprUseVisitor::new(&mut glcx, &infcx).walk_fn(decl, body);
glcx.report_potential_errors();
let GatherLoanCtxt { all_loans, move_data, .. } = glcx;
req_kind: ty::BorrowKind)
-> Result<(),()> {
- let aliasability = cmt.freely_aliasable(bccx.tcx);
+ let aliasability = cmt.freely_aliasable();
debug!("check_aliasability aliasability={:?} req_kind={:?}",
aliasability, req_kind);
}
impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
- pub fn tcx(&self) -> &'a TyCtxt<'tcx> { self.bccx.tcx }
+ pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.bccx.tcx }
/// Guarantees that `cmt` is assignable, or reports an error.
fn guarantee_assignment_valid(&mut self,
/// sure the loans being taken are sound.
struct StaticInitializerCtxt<'a, 'tcx: 'a> {
bccx: &'a BorrowckCtxt<'a, 'tcx>,
+ item_id: ast::NodeId
}
impl<'a, 'tcx, 'v> Visitor<'v> for StaticInitializerCtxt<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr) {
if let hir::ExprAddrOf(mutbl, ref base) = ex.node {
- let infcx = infer::new_infer_ctxt(self.bccx.tcx,
- &self.bccx.tcx.tables,
- None,
- ProjectionMode::AnyFinal);
+ let param_env = ty::ParameterEnvironment::for_item(self.bccx.tcx,
+ self.item_id);
+ let infcx = self.bccx.tcx.borrowck_fake_infer_ctxt(param_env);
let mc = mc::MemCategorizationContext::new(&infcx);
let base_cmt = mc.cat_expr(&base).unwrap();
let borrow_kind = ty::BorrowKind::from_mutbl(mutbl);
// Check that we don't allow borrows of unsafe static items.
- if check_aliasability(self.bccx, ex.span,
- BorrowViolation(euv::AddrOf),
- base_cmt, borrow_kind).is_err() {
+ let err = check_aliasability(self.bccx, ex.span,
+ BorrowViolation(euv::AddrOf),
+ base_cmt, borrow_kind).is_err();
+ if err {
return; // reported an error, no sense in reporting more.
}
}
}
}
-pub fn gather_loans_in_static_initializer(bccx: &mut BorrowckCtxt, expr: &hir::Expr) {
+pub fn gather_loans_in_static_initializer(bccx: &mut BorrowckCtxt,
+ item_id: ast::NodeId,
+ expr: &hir::Expr) {
debug!("gather_loans_in_static_initializer(expr={:?})", expr);
let mut sicx = StaticInitializerCtxt {
- bccx: bccx
+ bccx: bccx,
+ item_id: item_id
};
sicx.visit_expr(expr);
}
-impl<'tcx> DataflowState<MoveData<'tcx>> {
- pub fn new_move_analysis(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> Self {
+impl<'a, 'tcx> DataflowState<MoveData<'tcx>> {
+ pub fn new_move_analysis(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
let move_data = MoveData::gather_moves(mir, tcx);
DataflowState::new(mir, move_data)
}
}
}
-impl<'tcx> MoveData<'tcx> {
- pub fn gather_moves(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> Self {
+impl<'a, 'tcx> MoveData<'tcx> {
+ pub fn gather_moves(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
gather_moves(mir, tcx)
}
}
Aggregate, Drop, CallFn, CallArg, Return,
}
-fn gather_moves<'tcx>(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> MoveData<'tcx> {
+fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MoveData<'tcx> {
use self::StmtKind as SK;
let bbs = mir.all_basic_blocks();
}
struct BlockContext<'b, 'a: 'b, 'tcx: 'a> {
- tcx: &'b TyCtxt<'tcx>,
+ tcx: TyCtxt<'b, 'tcx, 'tcx>,
moves: &'b mut Vec<MoveOut>,
builder: MovePathDataBuilder<'a, 'tcx>,
path_map: &'b mut Vec<Vec<MoveOutIndex>>,
}
let mut mbcx = MirBorrowckCtxt {
+ flow_state: DataflowState::new_move_analysis(mir, bcx.tcx),
bcx: bcx,
mir: mir,
node_id: id,
attributes: attributes,
- flow_state: DataflowState::new_move_analysis(mir, bcx.tcx),
};
for bb in mir.all_basic_blocks() {
fn visit_trait_item(&mut self, ti: &hir::TraitItem) {
if let hir::ConstTraitItem(_, Some(ref expr)) = ti.node {
- gather_loans::gather_loans_in_static_initializer(self, &expr);
+ gather_loans::gather_loans_in_static_initializer(self, ti.id, &expr);
}
intravisit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &hir::ImplItem) {
if let hir::ImplItemKind::Const(_, ref expr) = ii.node {
- gather_loans::gather_loans_in_static_initializer(self, &expr);
+ gather_loans::gather_loans_in_static_initializer(self, ii.id, &expr);
}
intravisit::walk_impl_item(self, ii);
}
}
-pub fn check_crate<'tcx>(tcx: &TyCtxt<'tcx>, mir_map: &MirMap<'tcx>) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir_map: &MirMap<'tcx>) {
let mut bccx = BorrowckCtxt {
tcx: tcx,
mir_map: Some(mir_map),
match item.node {
hir::ItemStatic(_, _, ref ex) |
hir::ItemConst(_, ref ex) => {
- gather_loans::gather_loans_in_static_initializer(this, &ex);
+ gather_loans::gather_loans_in_static_initializer(this, item.id, &ex);
}
_ => { }
}
/// Accessor for introspective clients inspecting `AnalysisData` and
/// the `BorrowckCtxt` itself , e.g. the flowgraph visualizer.
pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
mir_map: Option<&'a MirMap<'tcx>>,
fn_parts: FnParts<'a>,
cfg: &cfg::CFG)
// Type definitions
pub struct BorrowckCtxt<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
// Hacky. As we visit various fns, we have to load up the
// free-region map for each one. This map is computed by during
}
pub fn closure_to_block(closure_id: ast::NodeId,
- tcx: &TyCtxt) -> ast::NodeId {
+ tcx: TyCtxt) -> ast::NodeId {
match tcx.map.get(closure_id) {
hir_map::NodeExpr(expr) => match expr.node {
hir::ExprClosure(_, _, ref block, _) => {
}
}
-impl<'tcx> LoanPath<'tcx> {
- pub fn kill_scope(&self, tcx: &TyCtxt<'tcx>) -> region::CodeExtent {
+impl<'a, 'tcx> LoanPath<'tcx> {
+ pub fn kill_scope(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> region::CodeExtent {
match self.kind {
LpVar(local_id) => tcx.region_maps.var_scope(local_id),
LpUpvar(upvar_id) => {
db.emit();
}
- pub fn report_use_of_moved_value<'b>(&self,
- use_span: Span,
- use_kind: MovedValueUseKind,
- lp: &LoanPath<'tcx>,
- the_move: &move_data::Move,
- moved_lp: &LoanPath<'tcx>,
- _param_env: &ty::ParameterEnvironment<'b,'tcx>) {
+ pub fn report_use_of_moved_value(&self,
+ use_span: Span,
+ use_kind: MovedValueUseKind,
+ lp: &LoanPath<'tcx>,
+ the_move: &move_data::Move,
+ moved_lp: &LoanPath<'tcx>,
+ _param_env: &ty::ParameterEnvironment<'tcx>) {
let (verb, verb_participle) = match use_kind {
MovedInUse => ("use", "used"),
MovedInCapture => ("capture", "captured"),
if let Categorization::Local(local_id) = err.cmt.cat {
let span = self.tcx.map.span(local_id);
if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(span) {
- db.span_suggestion(
- span,
- &format!("to make the {} mutable, use `mut` as shown:",
- self.cmt_to_string(&err.cmt)),
- format!("mut {}", snippet));
+ if snippet != "self" {
+ db.span_suggestion(
+ span,
+ &format!("to make the {} mutable, use `mut` as shown:",
+ self.cmt_to_string(&err.cmt)),
+ format!("mut {}", snippet));
+ }
}
}
}
}
}
-fn statement_scope_span(tcx: &TyCtxt, region: ty::Region) -> Option<Span> {
+fn statement_scope_span(tcx: TyCtxt, region: ty::Region) -> Option<Span> {
match region {
ty::ReScope(scope) => {
match tcx.map.find(scope.node_id(&tcx.region_maps)) {
}
}
-impl<'tcx> MoveData<'tcx> {
+impl<'a, 'tcx> MoveData<'tcx> {
pub fn new() -> MoveData<'tcx> {
MoveData {
paths: RefCell::new(Vec::new()),
/// Returns the existing move path index for `lp`, if any, and otherwise adds a new index for
/// `lp` and any of its base paths that do not yet have an index.
- pub fn move_path(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn move_path(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
lp: Rc<LoanPath<'tcx>>) -> MovePathIndex {
match self.path_map.borrow().get(&lp) {
Some(&index) => {
}
/// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`.
- pub fn add_move(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn add_move(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
lp: Rc<LoanPath<'tcx>>,
id: ast::NodeId,
kind: MoveKind) {
/// Adds a new record for an assignment to `lp` that occurs at location `id` with the given
/// `span`.
- pub fn add_assignment(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn add_assignment(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
lp: Rc<LoanPath<'tcx>>,
assign_id: ast::NodeId,
span: Span,
/// variant `lp`, that occurs at location `pattern_id`. (One
/// should be able to recover the span info from the
/// `pattern_id` and the ast_map, I think.)
- pub fn add_variant_match(&self,
- tcx: &TyCtxt<'tcx>,
+ pub fn add_variant_match(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
lp: Rc<LoanPath<'tcx>>,
pattern_id: ast::NodeId,
base_lp: Rc<LoanPath<'tcx>>,
self.variant_matches.borrow_mut().push(variant_match);
}
- fn fixup_fragment_sets(&self, tcx: &TyCtxt<'tcx>) {
+ fn fixup_fragment_sets(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) {
fragments::fixup_fragment_sets(self, tcx)
}
/// Moves are generated by moves and killed by assignments and
/// scoping. Assignments are generated by assignment to variables and
/// killed by scoping. See `README.md` for more details.
- fn add_gen_kills(&self,
- tcx: &TyCtxt<'tcx>,
+ fn add_gen_kills(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
dfcx_moves: &mut MoveDataFlow,
dfcx_assign: &mut AssignDataFlow) {
for (i, the_move) in self.moves.borrow().iter().enumerate() {
impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
pub fn new(move_data: MoveData<'tcx>,
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
cfg: &cfg::CFG,
id_range: IdRange,
decl: &hir::FnDecl,
http://doc.rust-lang.org/stable/book/references-and-borrowing.html
"##,
+E0509: r##"
+This error occurs when an attempt is made to move out of a value whose type
+implements the `Drop` trait.
+
+Example of erroneous code:
+
+```compile_fail
+struct FancyNum {
+ num: usize
+}
+
+struct DropStruct {
+ fancy: FancyNum
+}
+
+impl Drop for DropStruct {
+ fn drop(&mut self) {
+ // Destruct DropStruct, possibly using FancyNum
+ }
+}
+
+fn main() {
+ let drop_struct = DropStruct{fancy: FancyNum{num: 5}};
+ let fancy_field = drop_struct.fancy; // Error E0509
+ println!("Fancy: {}", fancy_field.num);
+ // implicit call to `drop_struct.drop()` as drop_struct goes out of scope
+}
+```
+
+Here, we tried to move a field out of a struct of type `DropStruct` which
+implements the `Drop` trait. However, a struct cannot be dropped if one or
+more of its fields have been moved.
+
+Structs implementing the `Drop` trait have an implicit destructor that gets
+called when they go out of scope. This destructor may use the fields of the
+struct, so moving out of the struct could make it impossible to run the
+destructor. Therefore, we must think of all values whose type implements the
+`Drop` trait as single units whose fields cannot be moved.
+
+This error can be fixed by creating a reference to the fields of a struct,
+enum, or tuple using the `ref` keyword:
+
+```
+struct FancyNum {
+ num: usize
+}
+
+struct DropStruct {
+ fancy: FancyNum
+}
+
+impl Drop for DropStruct {
+ fn drop(&mut self) {
+ // Destruct DropStruct, possibly using FancyNum
+ }
+}
+
+fn main() {
+ let drop_struct = DropStruct{fancy: FancyNum{num: 5}};
+ let ref fancy_field = drop_struct.fancy; // No more errors!
+ println!("Fancy: {}", fancy_field.num);
+ // implicit call to `drop_struct.drop()` as drop_struct goes out of scope
+}
+```
+
+Note that this technique can also be used in the arms of a match expression:
+
+```
+struct FancyNum {
+ num: usize
+}
+
+enum DropEnum {
+ Fancy(FancyNum)
+}
+
+impl Drop for DropEnum {
+ fn drop(&mut self) {
+ // Destruct DropEnum, possibly using FancyNum
+ }
+}
+
+fn main() {
+ // Creates and enum of type `DropEnum`, which implements `Drop`
+ let drop_enum = DropEnum::Fancy(FancyNum{num: 10});
+ match drop_enum {
+ // Creates a reference to the inside of `DropEnum::Fancy`
+ DropEnum::Fancy(ref fancy_field) => // No error!
+ println!("It was fancy-- {}!", fancy_field.num),
+ }
+ // implicit call to `drop_enum.drop()` as drop_enum goes out of scope
+}
+```
+"##,
+
}
register_diagnostics! {
E0503, // cannot use `..` because it was mutably borrowed
E0505, // cannot move out of `..` because it is borrowed
E0508, // cannot move out of type `..`, a non-copy fixed-size array
- E0509, // cannot move out of type `..`, which defines the `Drop` trait
E0524, // two closures require unique access to `..` at the same time
}
use rustc::middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor};
use rustc::middle::expr_use_visitor::{LoanCause, MutateMode};
use rustc::middle::expr_use_visitor as euv;
-use rustc::infer;
use rustc::middle::mem_categorization::{cmt};
use rustc::hir::pat_util::*;
use rustc::traits::ProjectionMode;
//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv
pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
- pub tcx: &'a TyCtxt<'tcx>,
- pub param_env: ParameterEnvironment<'a, 'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ pub param_env: ParameterEnvironment<'tcx>,
}
#[derive(Clone, PartialEq)]
}
}
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
tcx.visit_all_items_in_krate(DepNode::MatchCheck, &mut MatchCheckCtxt {
tcx: tcx,
param_env: tcx.empty_parameter_environment(),
}
pub struct StaticInliner<'a, 'tcx: 'a> {
- pub tcx: &'a TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub failed: bool,
pub renaming_map: Option<&'a mut FnvHashMap<(NodeId, Span), NodeId>>,
}
impl<'a, 'tcx> StaticInliner<'a, 'tcx> {
- pub fn new<'b>(tcx: &'b TyCtxt<'tcx>,
+ pub fn new<'b>(tcx: TyCtxt<'b, 'tcx, 'tcx>,
renaming_map: Option<&'b mut FnvHashMap<(NodeId, Span), NodeId>>)
-> StaticInliner<'b, 'tcx> {
StaticInliner {
PatKind::Ident(hir::BindByValue(_), _, ref sub) => {
let pat_ty = tcx.node_id_to_type(p.id);
//FIXME: (@jroesch) this code should be floated up as well
- let infcx = infer::new_infer_ctxt(cx.tcx,
- &cx.tcx.tables,
- Some(cx.param_env.clone()),
- ProjectionMode::AnyFinal);
- if infcx.type_moves_by_default(pat_ty, pat.span) {
- check_move(p, sub.as_ref().map(|p| &**p));
- }
+ cx.tcx.infer_ctxt(None, Some(cx.param_env.clone()),
+ ProjectionMode::AnyFinal).enter(|infcx| {
+ if infcx.type_moves_by_default(pat_ty, pat.span) {
+ check_move(p, sub.as_ref().map(|p| &**p));
+ }
+ });
}
PatKind::Ident(hir::BindByRef(_), _, _) => {
}
/// assign.
fn check_for_mutation_in_guard<'a, 'tcx>(cx: &'a MatchCheckCtxt<'a, 'tcx>,
guard: &hir::Expr) {
- let mut checker = MutationChecker {
- cx: cx,
- };
-
- let infcx = infer::new_infer_ctxt(cx.tcx,
- &cx.tcx.tables,
- Some(checker.cx.param_env.clone()),
- ProjectionMode::AnyFinal);
-
- let mut visitor = ExprUseVisitor::new(&mut checker, &infcx);
- visitor.walk_expr(guard);
+ cx.tcx.infer_ctxt(None, Some(cx.param_env.clone()),
+ ProjectionMode::AnyFinal).enter(|infcx| {
+ let mut checker = MutationChecker {
+ cx: cx,
+ };
+ let mut visitor = ExprUseVisitor::new(&mut checker, &infcx);
+ visitor.walk_expr(guard);
+ });
}
-struct MutationChecker<'a, 'tcx: 'a> {
- cx: &'a MatchCheckCtxt<'a, 'tcx>,
+struct MutationChecker<'a, 'gcx: 'a> {
+ cx: &'a MatchCheckCtxt<'a, 'gcx>,
}
-impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'gcx> {
fn matched_pat(&mut self, _: &Pat, _: cmt, _: euv::MatchMode) {}
fn consume(&mut self, _: NodeId, _: Span, _: cmt, _: ConsumeMode) {}
fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {}
The variable `s` has type `String`, and its use in the guard is as a variable of
type `String`. The guard code effectively executes in a separate scope to the
body of the arm, so the value would be moved into this anonymous scope and
-therefore become unavailable in the body of the arm. Although this example seems
-innocuous, the problem is most clear when considering functions that take their
-argument by value.
+therefore becomes unavailable in the body of the arm.
-```compile_fail
+The problem above can be solved by using the `ref` keyword.
+
+```
match Some("hi".to_string()) {
- Some(s) if { drop(s); false } => (),
- Some(s) => {}, // use s.
+ Some(ref s) if s.len() == 0 => {},
_ => {},
}
```
-The value would be dropped in the guard then become unavailable not only in the
-body of that arm but also in all subsequent arms! The solution is to bind by
-reference when using guards or refactor the entire expression, perhaps by
-putting the condition inside the body of the arm.
+Though this example seems innocuous and easy to solve, the problem becomes clear
+when it encounters functions which consume the value:
+
+```compile_fail
+struct A{}
+
+impl A {
+ fn consume(self) -> usize {
+ 0
+ }
+}
+
+fn main() {
+ let a = Some(A{});
+ match a {
+ Some(y) if y.consume() > 0 => {}
+ _ => {}
+ }
+}
+```
+
+In this situation, even the `ref` keyword cannot solve it, since borrowed
+content cannot be moved. This problem cannot be solved generally. If the value
+can be cloned, here is a not-so-specific solution:
+
+```
+#[derive(Clone)]
+struct A{}
+
+impl A {
+ fn consume(self) -> usize {
+ 0
+ }
+}
+
+fn main() {
+ let a = Some(A{});
+ match a{
+ Some(ref y) if y.clone().consume() > 0 => {}
+ _ => {}
+ }
+}
+```
+
+If the value will be consumed in the pattern guard, using its clone will not
+move its ownership, so the code works.
"##,
E0009: r##"
use rustc::hir::map as ast_map;
use rustc::hir::map::blocks::FnLikeNode;
use rustc::middle::cstore::{self, InlinedItem};
-use rustc::{infer, traits};
+use rustc::traits;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use rustc::hir::pat_util::def_to_path;
use rustc::ty::{self, Ty, TyCtxt, subst};
use rustc::ty::util::IntTypeExt;
use rustc::traits::ProjectionMode;
-use rustc::middle::astconv_util::ast_ty_to_prim_ty;
use rustc::util::nodemap::NodeMap;
use rustc::lint;
}
}
-fn lookup_variant_by_id<'a>(tcx: &'a ty::TyCtxt,
- enum_def: DefId,
- variant_def: DefId)
- -> Option<&'a Expr> {
+fn lookup_variant_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ enum_def: DefId,
+ variant_def: DefId)
+ -> Option<&'tcx Expr> {
fn variant_expr<'a>(variants: &'a [hir::Variant], id: ast::NodeId)
-> Option<&'a Expr> {
for variant in variants {
///
/// `substs` is optional and is used for associated constants.
/// This generally happens in late/trans const evaluation.
-pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
+pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
- substs: Option<subst::Substs<'tcx>>)
+ substs: Option<&'tcx subst::Substs<'tcx>>)
-> Option<(&'tcx Expr, Option<ty::Ty<'tcx>>)> {
if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
match tcx.map.find(node_id) {
None => None,
Some(ast_map::NodeItem(it)) => match it.node {
hir::ItemConst(ref ty, ref const_expr) => {
- Some((&const_expr, ast_ty_to_prim_ty(tcx, ty)))
+ Some((&const_expr, tcx.ast_ty_to_prim_ty(ty)))
}
_ => None
},
},
Some(ast_map::NodeImplItem(ii)) => match ii.node {
hir::ImplItemKind::Const(ref ty, ref expr) => {
- Some((&expr, ast_ty_to_prim_ty(tcx, ty)))
+ Some((&expr, tcx.ast_ty_to_prim_ty(ty)))
}
_ => None
},
let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node {
hir::ItemConst(ref ty, ref const_expr) => {
- Some((&**const_expr, ast_ty_to_prim_ty(tcx, ty)))
+ Some((&**const_expr, tcx.ast_ty_to_prim_ty(ty)))
},
_ => None
},
},
cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref ii)) => match ii.node {
hir::ImplItemKind::Const(ref ty, ref expr) => {
- Some((&**expr, ast_ty_to_prim_ty(tcx, ty)))
+ Some((&**expr, tcx.ast_ty_to_prim_ty(ty)))
},
_ => None
},
}
}
-fn inline_const_fn_from_external_crate(tcx: &TyCtxt, def_id: DefId)
- -> Option<ast::NodeId> {
+fn inline_const_fn_from_external_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> Option<ast::NodeId> {
match tcx.extern_const_fns.borrow().get(&def_id) {
Some(&ast::DUMMY_NODE_ID) => return None,
Some(&fn_id) => return Some(fn_id),
fn_id
}
-pub fn lookup_const_fn_by_id<'tcx>(tcx: &TyCtxt<'tcx>, def_id: DefId)
- -> Option<FnLikeNode<'tcx>>
+pub fn lookup_const_fn_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
+ -> Option<FnLikeNode<'tcx>>
{
let fn_id = if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
node_id
}
}
-pub fn const_expr_to_pat(tcx: &ty::TyCtxt, expr: &Expr, pat_id: ast::NodeId, span: Span)
- -> Result<P<hir::Pat>, DefId> {
+pub fn const_expr_to_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ expr: &Expr,
+ pat_id: ast::NodeId,
+ span: Span)
+ -> Result<P<hir::Pat>, DefId> {
let pat_ty = tcx.expr_ty(expr);
debug!("expr={:?} pat_ty={:?} pat_id={}", expr, pat_ty, pat_id);
match pat_ty.sty {
Ok(P(hir::Pat { id: expr.id, node: pat, span: span }))
}
-pub fn eval_const_expr(tcx: &TyCtxt, e: &Expr) -> ConstVal {
+pub fn eval_const_expr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ e: &Expr) -> ConstVal {
match eval_const_expr_partial(tcx, e, ExprTypeChecked, None) {
Ok(r) => r,
// non-const path still needs to be a fatal error, because enums are funky
/// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked,
/// but a few places need to evaluate constants during type-checking, like
/// computing the length of an array. (See also the FIXME above EvalHint.)
-pub fn eval_const_expr_partial<'tcx>(tcx: &TyCtxt<'tcx>,
- e: &Expr,
- ty_hint: EvalHint<'tcx>,
- fn_args: FnArgMap) -> EvalResult {
+pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ e: &Expr,
+ ty_hint: EvalHint<'tcx>,
+ fn_args: FnArgMap) -> EvalResult {
// Try to compute the type of the expression based on the EvalHint.
// (See also the definition of EvalHint, and the FIXME above EvalHint.)
let ety = match ty_hint {
}
}
hir::ExprCast(ref base, ref target_ty) => {
- let ety = ast_ty_to_prim_ty(tcx, &target_ty).or_else(|| ety)
+ let ety = tcx.ast_ty_to_prim_ty(&target_ty).or(ety)
.unwrap_or_else(|| {
tcx.sess.span_fatal(target_ty.span,
"target type not found for const cast")
}
}
-fn infer<'tcx>(
- i: ConstInt,
- tcx: &TyCtxt<'tcx>,
- ty_hint: &ty::TypeVariants<'tcx>,
-) -> Result<ConstInt, ErrKind> {
+fn infer<'a, 'tcx>(i: ConstInt,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty_hint: &ty::TypeVariants<'tcx>)
+ -> Result<ConstInt, ErrKind> {
use syntax::ast::*;
match (ty_hint, i) {
}
}
-fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
+fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ti: &'tcx hir::TraitItem,
trait_id: DefId,
- rcvr_substs: subst::Substs<'tcx>)
+ rcvr_substs: &'tcx subst::Substs<'tcx>)
-> Option<(&'tcx Expr, Option<ty::Ty<'tcx>>)>
{
let trait_ref = ty::Binder(
- rcvr_substs.erase_regions().to_trait_ref(tcx, trait_id)
+ rcvr_substs.clone().erase_regions().to_trait_ref(tcx, trait_id)
);
debug!("resolve_trait_associated_const: trait_ref={:?}",
trait_ref);
tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id());
- let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
-
- let mut selcx = traits::SelectionContext::new(&infcx);
- let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
- trait_ref.to_poly_trait_predicate());
- let selection = match selcx.select(&obligation) {
- Ok(Some(vtable)) => vtable,
- // Still ambiguous, so give up and let the caller decide whether this
- // expression is really needed yet. Some associated constant values
- // can't be evaluated until monomorphization is done in trans.
- Ok(None) => {
- return None
- }
- Err(_) => {
- return None
- }
- };
+ tcx.infer_ctxt(None, None, ProjectionMode::AnyFinal).enter(|infcx| {
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
+ trait_ref.to_poly_trait_predicate());
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(vtable)) => vtable,
+ // Still ambiguous, so give up and let the caller decide whether this
+ // expression is really needed yet. Some associated constant values
+ // can't be evaluated until monomorphization is done in trans.
+ Ok(None) => {
+ return None
+ }
+ Err(_) => {
+ return None
+ }
+ };
- // NOTE: this code does not currently account for specialization, but when
- // it does so, it should hook into the ProjectionMode to determine when the
- // constant should resolve; this will also require plumbing through to this
- // function whether we are in "trans mode" to pick the right ProjectionMode
- // when constructing the inference context above.
- match selection {
- traits::VtableImpl(ref impl_data) => {
- match tcx.associated_consts(impl_data.impl_def_id)
- .iter().find(|ic| ic.name == ti.name) {
- Some(ic) => lookup_const_by_id(tcx, ic.def_id, None),
- None => match ti.node {
- hir::ConstTraitItem(ref ty, Some(ref expr)) => {
- Some((&*expr, ast_ty_to_prim_ty(tcx, ty)))
+ // NOTE: this code does not currently account for specialization, but when
+ // it does so, it should hook into the ProjectionMode to determine when the
+ // constant should resolve; this will also require plumbing through to this
+ // function whether we are in "trans mode" to pick the right ProjectionMode
+ // when constructing the inference context above.
+ match selection {
+ traits::VtableImpl(ref impl_data) => {
+ match tcx.associated_consts(impl_data.impl_def_id)
+ .iter().find(|ic| ic.name == ti.name) {
+ Some(ic) => lookup_const_by_id(tcx, ic.def_id, None),
+ None => match ti.node {
+ hir::ConstTraitItem(ref ty, Some(ref expr)) => {
+ Some((&*expr, tcx.ast_ty_to_prim_ty(ty)))
+ },
+ _ => None,
},
- _ => None,
- },
+ }
+ }
+ _ => {
+ span_bug!(ti.span,
+ "resolve_trait_associated_const: unexpected vtable type")
}
}
- _ => {
- span_bug!(
- ti.span,
- "resolve_trait_associated_const: unexpected vtable type")
- }
- }
+ })
}
-fn cast_const_int<'tcx>(tcx: &TyCtxt<'tcx>, val: ConstInt, ty: ty::Ty) -> CastResult {
+fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstInt, ty: ty::Ty) -> CastResult {
let v = val.to_u64_unchecked();
match ty.sty {
ty::TyBool if v == 0 => Ok(Bool(false)),
}
}
-fn cast_const_float<'tcx>(tcx: &TyCtxt<'tcx>, f: f64, ty: ty::Ty) -> CastResult {
+fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, f: f64, ty: ty::Ty) -> CastResult {
match ty.sty {
ty::TyInt(_) if f >= 0.0 => cast_const_int(tcx, Infer(f as u64), ty),
ty::TyInt(_) => cast_const_int(tcx, InferSigned(f as i64), ty),
}
}
-fn cast_const<'tcx>(tcx: &TyCtxt<'tcx>, val: ConstVal, ty: ty::Ty) -> CastResult {
+fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstVal, ty: ty::Ty) -> CastResult {
match val {
Integral(i) => cast_const_int(tcx, i, ty),
Bool(b) => cast_const_int(tcx, Infer(b as u64), ty),
Float(f) => cast_const_float(tcx, f, ty),
Char(c) => cast_const_int(tcx, Infer(c as u64), ty),
Function(_) => Err(UnimplementedConstVal("casting fn pointers")),
+ ByteStr(_) => match ty.sty {
+ ty::TyRawPtr(_) => {
+ Err(ErrKind::UnimplementedConstVal("casting a bytestr to a raw ptr"))
+ },
+ _ => Err(CannotCast),
+ },
_ => Err(CannotCast),
}
}
-fn lit_to_const<'tcx>(lit: &ast::LitKind,
- tcx: &TyCtxt<'tcx>,
- ty_hint: Option<Ty<'tcx>>,
- span: Span,
- ) -> Result<ConstVal, ErrKind> {
+fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty_hint: Option<Ty<'tcx>>,
+ span: Span)
+ -> Result<ConstVal, ErrKind> {
use syntax::ast::*;
use syntax::ast::LitIntType::*;
match *lit {
}
}
-pub fn compare_lit_exprs<'tcx>(tcx: &TyCtxt<'tcx>,
- a: &Expr,
- b: &Expr) -> Option<Ordering> {
+pub fn compare_lit_exprs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ a: &Expr,
+ b: &Expr) -> Option<Ordering> {
let a = match eval_const_expr_partial(tcx, a, ExprTypeChecked, None) {
Ok(a) => a,
Err(e) => {
/// Returns the repeat count for a repeating vector expression.
-pub fn eval_repeat_count(tcx: &TyCtxt, count_expr: &hir::Expr) -> usize {
+pub fn eval_repeat_count<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ count_expr: &hir::Expr) -> usize {
let hint = UncheckedExprHint(tcx.types.usize);
match eval_const_expr_partial(tcx, count_expr, hint, None) {
Ok(Integral(Usize(count))) => {
use rustc::dep_graph::DepGraph;
use rustc::hir;
-use rustc::hir::map as hir_map;
+use rustc::hir::{map as hir_map, FreevarMap, TraitMap};
+use rustc::hir::def::DefMap;
use rustc_mir as mir;
use rustc::mir::mir_map::MirMap;
use rustc::session::{Session, CompileResult, compile_result_from_err_count};
use syntax;
use syntax_ext;
+#[derive(Clone)]
+pub struct Resolutions {
+ pub def_map: RefCell<DefMap>,
+ pub freevars: FreevarMap,
+ pub trait_map: TraitMap,
+ pub maybe_unused_trait_imports: NodeSet,
+}
+
pub fn compile_input(sess: &Session,
cstore: &CStore,
cfg: ast::CrateConfig,
time(sess.time_passes(),
"external crate/lib resolution",
- || LocalCrateReader::new(sess, &cstore, &defs, &expanded_crate, &id)
+ || LocalCrateReader::new(sess, &cstore, defs, &expanded_crate, &id)
.read_crates(&dep_graph));
- // Lower ast -> hir.
- let lcx = LoweringContext::new(sess, Some(&expanded_crate), defs);
- let hir_forest = &mut time(sess.time_passes(),
- "lowering ast -> hir",
- || hir_map::Forest::new(lower_crate(&lcx, &expanded_crate),
- dep_graph));
+ time(sess.time_passes(),
+ "early lint checks",
+ || lint::check_ast_crate(sess, &expanded_crate));
+
+ let (analysis, resolutions, mut hir_forest) = {
+ let defs = &mut *defs.borrow_mut();
+ lower_and_resolve(sess, &id, defs, &expanded_crate, dep_graph, control.make_glob_map)
+ };
// Discard MTWT tables that aren't required past lowering to HIR.
if !keep_mtwt_tables(sess) {
let arenas = ty::CtxtArenas::new();
// Construct the HIR map
+ let hir_forest = &mut hir_forest;
let hir_map = time(sess.time_passes(),
"indexing hir",
move || hir_map::map_crate(hir_forest, defs));
&arenas,
&cstore,
&hir_map,
+ &analysis,
+ &resolutions,
&expanded_crate,
&hir_map.krate(),
&id),
hir::check_attr::check_crate(sess, &expanded_crate);
});
- time(sess.time_passes(),
- "early lint checks",
- || lint::check_ast_crate(sess, &expanded_crate));
-
let opt_crate = if keep_ast(sess) {
Some(&expanded_crate)
} else {
phase_3_run_analysis_passes(sess,
hir_map,
+ analysis,
+ resolutions,
&arenas,
&id,
- control.make_glob_map,
|tcx, mir_map, analysis, result| {
{
// Eventually, we will want to track plugins.
pub expanded_crate: Option<&'a ast::Crate>,
pub hir_crate: Option<&'a hir::Crate>,
pub ast_map: Option<&'a hir_map::Map<'ast>>,
+ pub resolutions: Option<&'a Resolutions>,
pub mir_map: Option<&'b MirMap<'tcx>>,
pub analysis: Option<&'a ty::CrateAnalysis<'a>>,
- pub tcx: Option<&'b TyCtxt<'tcx>>,
+ pub tcx: Option<TyCtxt<'b, 'tcx, 'tcx>>,
pub trans: Option<&'a trans::CrateTranslation>,
}
expanded_crate: None,
hir_crate: None,
ast_map: None,
+ resolutions: None,
analysis: None,
mir_map: None,
tcx: None,
arenas: &'ast ty::CtxtArenas<'ast>,
cstore: &'a CStore,
hir_map: &'a hir_map::Map<'ast>,
+ analysis: &'a ty::CrateAnalysis,
+ resolutions: &'a Resolutions,
krate: &'a ast::Crate,
hir_crate: &'a hir::Crate,
crate_name: &'a str)
arenas: Some(arenas),
cstore: Some(cstore),
ast_map: Some(hir_map),
+ analysis: Some(analysis),
+ resolutions: Some(resolutions),
expanded_crate: Some(krate),
hir_crate: Some(hir_crate),
out_file: out_file.as_ref().map(|s| &**s),
hir_crate: &'a hir::Crate,
analysis: &'a ty::CrateAnalysis<'a>,
mir_map: Option<&'b MirMap<'tcx>>,
- tcx: &'b TyCtxt<'tcx>,
+ tcx: TyCtxt<'b, 'tcx, 'tcx>,
crate_name: &'a str)
-> CompileState<'a, 'b, 'ast, 'tcx> {
CompileState {
krate
}
+pub fn lower_and_resolve<'a>(sess: &Session,
+ id: &'a str,
+ defs: &mut hir_map::Definitions,
+ krate: &ast::Crate,
+ dep_graph: DepGraph,
+ make_glob_map: resolve::MakeGlobMap)
+ -> (ty::CrateAnalysis<'a>, Resolutions, hir_map::Forest) {
+ resolve::with_resolver(sess, defs, make_glob_map, |mut resolver| {
+ time(sess.time_passes(), "name resolution", || {
+ resolve::resolve_crate(&mut resolver, krate);
+ });
+
+ // Lower ast -> hir.
+ let hir_forest = time(sess.time_passes(), "lowering ast -> hir", || {
+ let lcx = LoweringContext::new(sess, Some(krate), &mut resolver);
+ hir_map::Forest::new(lower_crate(&lcx, krate), dep_graph)
+ });
+
+ (ty::CrateAnalysis {
+ export_map: resolver.export_map,
+ access_levels: AccessLevels::default(),
+ reachable: NodeSet(),
+ name: &id,
+ glob_map: if resolver.make_glob_map { Some(resolver.glob_map) } else { None },
+ }, Resolutions {
+ def_map: RefCell::new(resolver.def_map),
+ freevars: resolver.freevars,
+ trait_map: resolver.trait_map,
+ maybe_unused_trait_imports: resolver.maybe_unused_trait_imports,
+ }, hir_forest)
+ })
+}
+
/// Run the resolution, typechecking, region checking and other
/// miscellaneous analysis passes on the crate. Return various
/// structures carrying the results of the analysis.
pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
hir_map: hir_map::Map<'tcx>,
+ mut analysis: ty::CrateAnalysis,
+ resolutions: Resolutions,
arenas: &'tcx ty::CtxtArenas<'tcx>,
name: &str,
- make_glob_map: resolve::MakeGlobMap,
f: F)
-> Result<R, usize>
- where F: FnOnce(&TyCtxt<'tcx>, Option<MirMap<'tcx>>, ty::CrateAnalysis, CompileResult) -> R
+ where F: for<'a> FnOnce(TyCtxt<'a, 'tcx, 'tcx>,
+ Option<MirMap<'tcx>>,
+ ty::CrateAnalysis,
+ CompileResult) -> R
{
macro_rules! try_with_f {
($e: expr, ($t: expr, $m: expr, $a: expr)) => {
})
})?;
- let resolve::CrateMap {
- def_map,
- freevars,
- maybe_unused_trait_imports,
- export_map,
- trait_map,
- glob_map,
- } = time(sess.time_passes(),
- "name resolution",
- || resolve::resolve_crate(sess, &hir_map, make_glob_map));
-
- let mut analysis = ty::CrateAnalysis {
- export_map: export_map,
- access_levels: AccessLevels::default(),
- reachable: NodeSet(),
- name: name,
- glob_map: glob_map,
- };
-
let named_region_map = time(time_passes,
"lifetime resolution",
|| middle::resolve_lifetime::krate(sess,
&hir_map,
- &def_map.borrow()))?;
+ &resolutions.def_map.borrow()))?;
time(time_passes,
"looking for entry point",
time(time_passes,
"static item recursion checking",
- || static_recursion::check_crate(sess, &def_map.borrow(), &hir_map))?;
+ || static_recursion::check_crate(sess, &resolutions.def_map.borrow(), &hir_map))?;
let index = stability::Index::new(&hir_map);
+ let trait_map = resolutions.trait_map;
TyCtxt::create_and_enter(sess,
arenas,
- def_map,
+ resolutions.def_map,
named_region_map,
hir_map,
- freevars,
- maybe_unused_trait_imports,
+ resolutions.freevars,
+ resolutions.maybe_unused_trait_imports,
region_map,
lang_items,
index,
}
/// Run the translation phase to LLVM, after which the AST and analysis can
-pub fn phase_4_translate_to_llvm<'tcx>(tcx: &TyCtxt<'tcx>,
- mut mir_map: MirMap<'tcx>,
- analysis: ty::CrateAnalysis) -> trans::CrateTranslation {
+pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ mut mir_map: MirMap<'tcx>,
+ analysis: ty::CrateAnalysis)
+ -> trans::CrateTranslation {
let time_passes = tcx.sess.time_passes();
time(time_passes,
control.after_write_deps.callback = box move |state| {
pretty::print_after_write_deps(state.session,
state.ast_map.unwrap(),
+ state.analysis.unwrap(),
+ state.resolutions.unwrap(),
state.input,
&state.expanded_crate.take().unwrap(),
state.crate_name.unwrap(),
pub use self::PpMode::*;
use self::NodesMatchingUII::*;
-use {driver, abort_on_err};
+use abort_on_err;
+use driver::{self, Resolutions};
use rustc::dep_graph::DepGraph;
use rustc::ty::{self, TyCtxt};
use rustc::session::config::Input;
use rustc_borrowck as borrowck;
use rustc_borrowck::graphviz as borrowck_dot;
-use rustc_resolve as resolve;
use rustc_mir::pretty::write_mir_pretty;
use rustc_mir::graphviz::write_mir_graphviz;
fn call_with_pp_support_hir<'tcx, A, B, F>(&self,
sess: &'tcx Session,
ast_map: &hir_map::Map<'tcx>,
+ analysis: &ty::CrateAnalysis,
+ resolutions: &Resolutions,
arenas: &'tcx ty::CtxtArenas<'tcx>,
id: &str,
payload: B,
PpmTyped => {
abort_on_err(driver::phase_3_run_analysis_passes(sess,
ast_map.clone(),
+ analysis.clone(),
+ resolutions.clone(),
arenas,
id,
- resolve::MakeGlobMap::No,
|tcx, _, _, _| {
let annotation = TypedAnnotation {
tcx: tcx,
struct TypedAnnotation<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> {
}
}
-fn print_flowgraph<'tcx, W: Write>(variants: Vec<borrowck_dot::Variant>,
- tcx: &TyCtxt<'tcx>,
- mir_map: Option<&MirMap<'tcx>>,
- code: blocks::Code,
- mode: PpFlowGraphMode,
- mut out: W)
- -> io::Result<()> {
+fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec<borrowck_dot::Variant>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ mir_map: Option<&MirMap<'tcx>>,
+ code: blocks::Code,
+ mode: PpFlowGraphMode,
+ mut out: W)
+ -> io::Result<()> {
let cfg = match code {
blocks::BlockCode(block) => cfg::CFG::new(tcx, &block),
blocks::FnLikeCode(fn_like) => cfg::CFG::new(tcx, &fn_like.body()),
pub fn print_after_write_deps<'tcx, 'a: 'tcx>(sess: &'a Session,
ast_map: &hir_map::Map<'tcx>,
+ analysis: &ty::CrateAnalysis,
+ resolutions: &Resolutions,
input: &Input,
krate: &ast::Crate,
crate_name: &str,
let _ignore = dep_graph.in_ignore();
if ppm.needs_analysis() {
- print_with_analysis(sess, ast_map, crate_name, arenas, ppm, opt_uii, ofile);
+ print_with_analysis(sess, ast_map, analysis, resolutions,
+ crate_name, arenas, ppm, opt_uii, ofile);
return;
}
let out: &mut Write = &mut out;
s.call_with_pp_support_hir(sess,
ast_map,
+ analysis,
+ resolutions,
arenas,
crate_name,
box out,
let out: &mut Write = &mut out;
s.call_with_pp_support_hir(sess,
ast_map,
+ analysis,
+ resolutions,
arenas,
crate_name,
(out,uii),
// Instead, we call that function ourselves.
fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session,
ast_map: &hir_map::Map<'tcx>,
+ analysis: &ty::CrateAnalysis,
+ resolutions: &Resolutions,
crate_name: &str,
arenas: &'tcx ty::CtxtArenas<'tcx>,
ppm: PpMode,
abort_on_err(driver::phase_3_run_analysis_passes(sess,
ast_map.clone(),
+ analysis.clone(),
+ resolutions.clone(),
arenas,
crate_name,
- resolve::MakeGlobMap::No,
|tcx, mir_map, _, _| {
match ppm {
PpmMir | PpmMirCFG => {
use driver;
use rustc::dep_graph::DepGraph;
use rustc_lint;
-use rustc_resolve as resolve;
+use rustc_resolve::MakeGlobMap;
use rustc::middle::lang_items;
use rustc::middle::free_region::FreeRegionMap;
use rustc::middle::region::{self, CodeExtent};
use syntax::parse::token;
use syntax::feature_gate::UnstableFeatures;
-use rustc::hir::lowering::{lower_crate, LoweringContext};
use rustc::hir;
-struct Env<'a, 'tcx: 'a> {
- infcx: &'a infer::InferCtxt<'a, 'tcx>,
+struct Env<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a infer::InferCtxt<'a, 'gcx, 'tcx>,
}
struct RH<'a> {
let krate = driver::assign_node_ids(&sess, krate);
let defs = &RefCell::new(hir_map::collect_definitions(&krate));
LocalCrateReader::new(&sess, &cstore, defs, &krate, "test_crate").read_crates(&dep_graph);
- let lcx = LoweringContext::new(&sess, Some(&krate), defs);
let _ignore = dep_graph.in_ignore();
- let mut hir_forest = &mut hir_map::Forest::new(lower_crate(&lcx, &krate), dep_graph.clone());
+
+ let (_, resolutions, mut hir_forest) = {
+ let (defs, dep_graph) = (&mut *defs.borrow_mut(), dep_graph.clone());
+ driver::lower_and_resolve(&sess, "test-crate", defs, &krate, dep_graph, MakeGlobMap::No)
+ };
+
let arenas = ty::CtxtArenas::new();
- let ast_map = hir_map::map_crate(hir_forest, defs);
+ let ast_map = hir_map::map_crate(&mut hir_forest, defs);
// run just enough stuff to build a tcx:
let lang_items = lang_items::collect_language_items(&sess, &ast_map);
- let resolve::CrateMap { def_map, freevars, maybe_unused_trait_imports, .. } =
- resolve::resolve_crate(&sess, &ast_map, resolve::MakeGlobMap::No);
- let named_region_map = resolve_lifetime::krate(&sess, &ast_map, &def_map.borrow());
+ let named_region_map = resolve_lifetime::krate(&sess, &ast_map, &resolutions.def_map.borrow());
let region_map = region::resolve_crate(&sess, &ast_map);
let index = stability::Index::new(&ast_map);
TyCtxt::create_and_enter(&sess,
- &arenas,
- def_map,
- named_region_map.unwrap(),
- ast_map,
- freevars,
- maybe_unused_trait_imports,
- region_map,
- lang_items,
- index,
- "test_crate",
- |tcx| {
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- None,
- ProjectionMode::AnyFinal);
- body(Env { infcx: &infcx });
- let free_regions = FreeRegionMap::new();
- infcx.resolve_regions_and_report_errors(&free_regions,
- ast::CRATE_NODE_ID);
- assert_eq!(tcx.sess.err_count(), expected_err_count);
- });
+ &arenas,
+ resolutions.def_map,
+ named_region_map.unwrap(),
+ ast_map,
+ resolutions.freevars,
+ resolutions.maybe_unused_trait_imports,
+ region_map,
+ lang_items,
+ index,
+ "test_crate",
+ |tcx| {
+ tcx.infer_ctxt(None, None, ProjectionMode::AnyFinal).enter(|infcx| {
+
+ body(Env { infcx: &infcx });
+ let free_regions = FreeRegionMap::new();
+ infcx.resolve_regions_and_report_errors(&free_regions, ast::CRATE_NODE_ID);
+ assert_eq!(tcx.sess.err_count(), expected_err_count);
+ });
+ });
}
-impl<'a, 'tcx> Env<'a, 'tcx> {
- pub fn tcx(&self) -> &TyCtxt<'tcx> {
+impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.infcx.tcx
}
}
pub fn make_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
- match infer::mk_subty(self.infcx, true, TypeOrigin::Misc(DUMMY_SP), a, b) {
+ match self.infcx.sub_types(true, TypeOrigin::Misc(DUMMY_SP), a, b) {
Ok(_) => true,
Err(ref e) => panic!("Encountered error: {}", e),
}
}
pub fn is_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
- match infer::can_mk_subty(self.infcx, a, b) {
- Ok(_) => true,
- Err(_) => false,
- }
+ self.infcx.can_sub_types(a, b).is_ok()
}
pub fn assert_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) {
pub fn t_fn(&self, input_tys: &[Ty<'tcx>], output_ty: Ty<'tcx>) -> Ty<'tcx> {
let input_args = input_tys.iter().cloned().collect();
- self.infcx.tcx.mk_fn_ptr(ty::BareFnTy {
+ self.infcx.tcx.mk_fn_ptr(self.infcx.tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(output_ty),
variadic: false,
}),
- })
+ }))
}
pub fn t_nil(&self) -> Ty<'tcx> {
infer::TypeTrace::dummy(self.tcx())
}
- pub fn sub(&self, t1: &Ty<'tcx>, t2: &Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
+ pub fn sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
let trace = self.dummy_type_trace();
- self.infcx.sub(true, trace, t1, t2)
+ self.infcx.sub(true, trace, &t1, &t2)
}
- pub fn lub(&self, t1: &Ty<'tcx>, t2: &Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
+ pub fn lub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
let trace = self.dummy_type_trace();
- self.infcx.lub(true, trace, t1, t2)
+ self.infcx.lub(true, trace, &t1, &t2)
}
- pub fn glb(&self, t1: &Ty<'tcx>, t2: &Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
+ pub fn glb(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
let trace = self.dummy_type_trace();
- self.infcx.glb(true, trace, t1, t2)
+ self.infcx.glb(true, trace, &t1, &t2)
}
/// Checks that `t1 <: t2` is true (this may register additional
/// region checks).
pub fn check_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) {
- match self.sub(&t1, &t2) {
+ match self.sub(t1, t2) {
Ok(InferOk { obligations, .. }) => {
// FIXME(#32730) once obligations are being propagated, assert the right thing.
assert!(obligations.is_empty());
/// Checks that `t1 <: t2` is false (this may register additional
/// region checks).
pub fn check_not_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) {
- match self.sub(&t1, &t2) {
+ match self.sub(t1, t2) {
Err(_) => {}
Ok(_) => {
panic!("unexpected success computing sub({:?},{:?})", t1, t2);
/// Checks that `LUB(t1,t2) == t_lub`
pub fn check_lub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>, t_lub: Ty<'tcx>) {
- match self.lub(&t1, &t2) {
+ match self.lub(t1, t2) {
Ok(InferOk { obligations, value: t }) => {
// FIXME(#32730) once obligations are being propagated, assert the right thing.
assert!(obligations.is_empty());
/// Checks that `GLB(t1,t2) == t_glb`
pub fn check_glb(&self, t1: Ty<'tcx>, t2: Ty<'tcx>, t_glb: Ty<'tcx>) {
debug!("check_glb(t1={}, t2={}, t_glb={})", t1, t2, t_glb);
- match self.glb(&t1, &t2) {
+ match self.glb(t1, t2) {
Err(e) => {
panic!("unexpected error computing LUB: {:?}", e)
}
const THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need";
const ID: &'static str = "id";
-pub fn assert_dep_graph(tcx: &TyCtxt) {
+pub fn assert_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _ignore = tcx.dep_graph.in_ignore();
if tcx.sess.opts.debugging_opts.dump_dep_graph {
FnvHashSet<(Span, InternedString, ast::NodeId, DepNode<DefId>)>>;
struct IfThisChanged<'a, 'tcx:'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
if_this_changed: SourceHashMap,
then_this_would_need: TargetHashMap,
}
}
}
-fn check_paths(tcx: &TyCtxt,
- if_this_changed: &SourceHashMap,
- then_this_would_need: &TargetHashMap)
+fn check_paths<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ if_this_changed: &SourceHashMap,
+ then_this_would_need: &TargetHashMap)
{
// Return early here so as not to construct the query, which is not cheap.
if if_this_changed.is_empty() {
}
}
-fn dump_graph(tcx: &TyCtxt) {
+fn dump_graph(tcx: TyCtxt) {
let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| format!("dep_graph"));
let query = tcx.dep_graph.query();
use std::hash::{Hash, SipHasher, Hasher};
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
use rustc::hir::svh::Svh;
-use rustc::ty;
+use rustc::ty::TyCtxt;
use rustc::hir::intravisit::{self, Visitor};
use self::svh_visitor::StrictVersionHashVisitor;
pub trait SvhCalculate {
/// Calculate the SVH for an entire krate.
- fn calculate_krate_hash(&self) -> Svh;
+ fn calculate_krate_hash(self) -> Svh;
/// Calculate the SVH for a particular item.
- fn calculate_item_hash(&self, def_id: DefId) -> u64;
+ fn calculate_item_hash(self, def_id: DefId) -> u64;
}
-impl<'tcx> SvhCalculate for ty::TyCtxt<'tcx> {
- fn calculate_krate_hash(&self) -> Svh {
+impl<'a, 'tcx> SvhCalculate for TyCtxt<'a, 'tcx, 'tcx> {
+ fn calculate_krate_hash(self) -> Svh {
// FIXME (#14132): This is better than it used to be, but it still not
// ideal. We now attempt to hash only the relevant portions of the
// Crate AST as well as the top-level crate attributes. (However,
Svh::from_hash(state.finish())
}
- fn calculate_item_hash(&self, def_id: DefId) -> u64 {
+ fn calculate_item_hash(self, def_id: DefId) -> u64 {
assert!(def_id.is_local());
let mut state = SipHasher::new();
use syntax::ast::{self, Name, NodeId};
use syntax::codemap::Span;
use syntax::parse::token;
- use rustc::ty;
+ use rustc::ty::TyCtxt;
use rustc::hir;
use rustc::hir::*;
use rustc::hir::intravisit as visit;
use std::hash::{Hash, SipHasher};
pub struct StrictVersionHashVisitor<'a, 'tcx: 'a> {
- pub tcx: &'a ty::TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub st: &'a mut SipHasher,
}
impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> {
pub fn new(st: &'a mut SipHasher,
- tcx: &'a ty::TyCtxt<'tcx>)
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> Self {
StrictVersionHashVisitor { st: st, tcx: tcx }
}
use rustc::dep_graph::DepNode;
use rustc::hir::map::DefPath;
use rustc::hir::def_id::DefId;
-use rustc::ty;
+use rustc::ty::TyCtxt;
use rustc::util::nodemap::DefIdMap;
use std::fmt::{self, Debug};
DefIdDirectory { paths: vec![] }
}
- pub fn retrace(&self, tcx: &ty::TyCtxt) -> RetracedDefIdDirectory {
+ pub fn retrace(&self, tcx: TyCtxt) -> RetracedDefIdDirectory {
let ids = self.paths.iter()
.map(|path| tcx.map.retrace_path(path))
.collect();
}
pub struct DefIdDirectoryBuilder<'a,'tcx:'a> {
- tcx: &'a ty::TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
hash: DefIdMap<Option<DefPathIndex>>,
directory: DefIdDirectory,
}
impl<'a,'tcx> DefIdDirectoryBuilder<'a,'tcx> {
- pub fn new(tcx: &'a ty::TyCtxt<'tcx>) -> DefIdDirectoryBuilder<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> DefIdDirectoryBuilder<'a, 'tcx> {
DefIdDirectoryBuilder {
tcx: tcx,
hash: DefIdMap(),
use syntax::ast::{self, Attribute, MetaItem};
use syntax::attr::AttrMetaMethods;
use syntax::parse::token::InternedString;
-use rustc::ty;
+use rustc::ty::TyCtxt;
const DIRTY: &'static str = "rustc_dirty";
const CLEAN: &'static str = "rustc_clean";
const LABEL: &'static str = "label";
const CFG: &'static str = "cfg";
-pub fn check_dirty_clean_annotations(tcx: &ty::TyCtxt) {
+pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _ignore = tcx.dep_graph.in_ignore();
let query = tcx.dep_graph.query();
let krate = tcx.map.krate();
}
pub struct DirtyCleanVisitor<'a, 'tcx:'a> {
- tcx: &'a ty::TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
query: &'a DepGraphQuery<DefId>,
}
use rbml::opaque::Decoder;
use rustc::dep_graph::DepNode;
use rustc::hir::def_id::DefId;
-use rustc::ty;
+use rustc::ty::TyCtxt;
use rustc_data_structures::fnv::FnvHashSet;
use rustc_serialize::Decodable as RustcDecodable;
use std::io::Read;
/// early in compilation, before we've really done any work, but
/// actually it doesn't matter all that much.) See `README.md` for
/// more general overview.
-pub fn load_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>) {
+pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _ignore = tcx.dep_graph.in_ignore();
if let Some(dep_graph) = dep_graph_path(tcx) {
}
}
-pub fn load_dep_graph_if_exists<'tcx>(tcx: &ty::TyCtxt<'tcx>, path: &Path) {
+pub fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, path: &Path) {
if !path.exists() {
return;
}
}
}
-pub fn decode_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>, data: &[u8])
- -> Result<(), Error>
+pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ data: &[u8])
+ -> Result<(), Error>
{
// Deserialize the directory and dep-graph.
let mut decoder = Decoder::new(data, 0);
Ok(())
}
-fn initial_dirty_nodes<'tcx>(tcx: &ty::TyCtxt<'tcx>,
- hashed_items: &[SerializedHash],
- retraced: &RetracedDefIdDirectory)
- -> DirtyNodes {
+fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ hashed_items: &[SerializedHash],
+ retraced: &RetracedDefIdDirectory)
+ -> DirtyNodes {
let mut items_removed = false;
let mut dirty_nodes = FnvHashSet();
for hashed_item in hashed_items {
use calculate_svh::SvhCalculate;
use rbml::opaque::Encoder;
use rustc::dep_graph::DepNode;
-use rustc::ty;
+use rustc::ty::TyCtxt;
use rustc_serialize::{Encodable as RustcEncodable};
use std::io::{self, Cursor, Write};
use std::fs::{self, File};
use super::directory::*;
use super::util::*;
-pub fn save_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>) {
+pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _ignore = tcx.dep_graph.in_ignore();
if let Some(dep_graph) = dep_graph_path(tcx) {
}
}
-pub fn encode_dep_graph<'tcx>(tcx: &ty::TyCtxt<'tcx>,
- encoder: &mut Encoder)
- -> io::Result<()>
-{
+pub fn encode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ encoder: &mut Encoder)
+ -> io::Result<()> {
// Here we take advantage of how RBML allows us to skip around
// and encode the depgraph as a two-part structure:
//
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use rustc::ty;
+use rustc::ty::TyCtxt;
use std::fs;
use std::io;
use std::path::{PathBuf, Path};
-pub fn dep_graph_path<'tcx>(tcx: &ty::TyCtxt<'tcx>) -> Option<PathBuf> {
+pub fn dep_graph_path(tcx: TyCtxt) -> Option<PathBuf> {
// For now, just save/load dep-graph from
// directory/dep_graph.rbml
tcx.sess.opts.incremental.as_ref().and_then(|incr_dir| {
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use middle::stability;
-use rustc::{cfg, infer};
+use rustc::cfg;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::adjustment;
let parameter_environment = cx.tcx.empty_parameter_environment();
// FIXME (@jroesch) should probably inver this so that the parameter env still impls this
// method
- if !ty.moves_by_default(¶meter_environment, item.span) {
+ if !ty.moves_by_default(cx.tcx, ¶meter_environment, item.span) {
return;
}
- if parameter_environment.can_type_implement_copy(ty, item.span).is_ok() {
+ if parameter_environment.can_type_implement_copy(cx.tcx, ty, item.span).is_ok() {
cx.span_lint(MISSING_COPY_IMPLEMENTATIONS,
item.span,
"type could implement `Copy`; consider adding `impl \
// Functions for identifying if the given Expr NodeId `id`
// represents a call to the function `fn_id`/method `method`.
- fn expr_refers_to_this_fn(tcx: &TyCtxt,
+ fn expr_refers_to_this_fn(tcx: TyCtxt,
fn_id: ast::NodeId,
id: ast::NodeId) -> bool {
match tcx.map.get(id) {
}
// Check if the expression `id` performs a call to `method`.
- fn expr_refers_to_this_method(tcx: &TyCtxt,
- method: &ty::Method,
- id: ast::NodeId) -> bool {
+ fn expr_refers_to_this_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ method: &ty::Method,
+ id: ast::NodeId) -> bool {
// Check for method calls and overloaded operators.
let opt_m = tcx.tables.borrow().method_map.get(&ty::MethodCall::expr(id)).cloned();
if let Some(m) = opt_m {
hir_map::NodeExpr(&hir::Expr { node: hir::ExprCall(ref callee, _), .. }) => {
match tcx.def_map.borrow().get(&callee.id).map(|d| d.full_def()) {
Some(Def::Method(def_id)) => {
- let item_substs =
- tcx.tables.borrow().item_substs
- .get(&callee.id)
- .cloned()
- .unwrap_or_else(|| ty::ItemSubsts::empty());
+ let item_substs = tcx.node_id_item_substs(callee.id);
method_call_refers_to_method(
tcx, method, def_id, &item_substs.substs, id)
}
// Check if the method call to the method with the ID `callee_id`
// and instantiated with `callee_substs` refers to method `method`.
- fn method_call_refers_to_method<'tcx>(tcx: &TyCtxt<'tcx>,
- method: &ty::Method,
- callee_id: DefId,
- callee_substs: &Substs<'tcx>,
- expr_id: ast::NodeId) -> bool {
+ fn method_call_refers_to_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ method: &ty::Method,
+ callee_id: DefId,
+ callee_substs: &Substs<'tcx>,
+ expr_id: ast::NodeId) -> bool {
let callee_item = tcx.impl_or_trait_item(callee_id);
match callee_item.container() {
// checking, so it's always local
let node_id = tcx.map.as_local_node_id(method.def_id).unwrap();
- let param_env = ty::ParameterEnvironment::for_item(tcx, node_id);
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- Some(param_env),
- ProjectionMode::AnyFinal);
- let mut selcx = traits::SelectionContext::new(&infcx);
- match selcx.select(&obligation) {
- // The method comes from a `T: Trait` bound.
- // If `T` is `Self`, then this call is inside
- // a default method definition.
- Ok(Some(traits::VtableParam(_))) => {
- let self_ty = callee_substs.self_ty();
- let on_self = self_ty.map_or(false, |t| t.is_self());
- // We can only be recurring in a default
- // method if we're being called literally
- // on the `Self` type.
- on_self && callee_id == method.def_id
- }
+ let param_env = Some(ty::ParameterEnvironment::for_item(tcx, node_id));
+ tcx.infer_ctxt(None, param_env, ProjectionMode::AnyFinal).enter(|infcx| {
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ match selcx.select(&obligation) {
+ // The method comes from a `T: Trait` bound.
+ // If `T` is `Self`, then this call is inside
+ // a default method definition.
+ Ok(Some(traits::VtableParam(_))) => {
+ let self_ty = callee_substs.self_ty();
+ let on_self = self_ty.map_or(false, |t| t.is_self());
+ // We can only be recurring in a default
+ // method if we're being called literally
+ // on the `Self` type.
+ on_self && callee_id == method.def_id
+ }
- // The `impl` is known, so we check that with a
- // special case:
- Ok(Some(traits::VtableImpl(vtable_impl))) => {
- let container = ty::ImplContainer(vtable_impl.impl_def_id);
- // It matches if it comes from the same impl,
- // and has the same method name.
- container == method.container
- && callee_item.name() == method.name
- }
+ // The `impl` is known, so we check that with a
+ // special case:
+ Ok(Some(traits::VtableImpl(vtable_impl))) => {
+ let container = ty::ImplContainer(vtable_impl.impl_def_id);
+ // It matches if it comes from the same impl,
+ // and has the same method name.
+ container == method.container
+ && callee_item.name() == method.name
+ }
- // There's no way to know if this call is
- // recursive, so we assume it's not.
- _ => return false
- }
+ // There's no way to know if this call is
+ // recursive, so we assume it's not.
+ _ => false
+ }
+ })
}
}
}
#![allow(non_snake_case)]
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TyCtxt};
use middle::const_val::ConstVal;
}
}
- fn check_limits(tcx: &TyCtxt, binop: hir::BinOp,
- l: &hir::Expr, r: &hir::Expr) -> bool {
+ fn check_limits<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ binop: hir::BinOp,
+ l: &hir::Expr,
+ r: &hir::Expr) -> bool {
let (lit, expr, swap) = match (&l.node, &r.node) {
(&hir::ExprLit(_), _) => (l, r, true),
(_, &hir::ExprLit(_)) => (r, l, false),
/// to function pointers and references, but could be
/// expanded to cover NonZero raw pointers and newtypes.
/// FIXME: This duplicates code in trans.
-fn is_repr_nullable_ptr<'tcx>(tcx: &TyCtxt<'tcx>,
- def: ty::AdtDef<'tcx>,
- substs: &Substs<'tcx>)
- -> bool {
+fn is_repr_nullable_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def: ty::AdtDef<'tcx>,
+ substs: &Substs<'tcx>)
+ -> bool {
if def.variants.len() == 2 {
let data_idx;
ty: Ty<'tcx>)
-> FfiResult {
use self::FfiResult::*;
- let cx = &self.cx.tcx;
+ let cx = self.cx.tcx;
// Protect against infinite recursion, for example
// `struct S(*mut S);`.
}
for field in &def.struct_variant().fields {
- let field_ty = infer::normalize_associated_type(cx, &field.ty(cx, substs));
+ let field_ty = cx.normalize_associated_type(&field.ty(cx, substs));
let r = self.check_type_for_ffi(cache, field_ty);
match r {
FfiSafe => {}
// Check the contained variants.
for variant in &def.variants {
for field in &variant.fields {
- let arg = infer::normalize_associated_type(cx, &field.ty(cx, substs));
+ let arg = cx.normalize_associated_type(&field.ty(cx, substs));
let r = self.check_type_for_ffi(cache, arg);
match r {
FfiSafe => {}
fn check_type_for_ffi_and_report_errors(&mut self, sp: Span, ty: Ty<'tcx>) {
// it is only OK to use this function because extern fns cannot have
// any generic types right now:
- let ty = infer::normalize_associated_type(self.cx.tcx, &ty);
+ let ty = self.cx.tcx.normalize_associated_type(&ty);
match self.check_type_for_ffi(&mut FnvHashSet(), ty) {
FfiResult::FfiSafe => {}
#[cfg(test)] use syntax::parse;
#[cfg(test)] use syntax::ast::NodeId;
#[cfg(test)] use rustc::hir::print as pprust;
-#[cfg(test)] use rustc::hir::lowering::{lower_item, LoweringContext};
+#[cfg(test)] use rustc::hir::lowering::{lower_item, LoweringContext, DummyResolver};
struct DecodeContext<'a, 'b, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
cdata: &'b cstore::crate_metadata,
from_id_range: IdRange,
to_id_range: IdRange,
/// Decodes an item from its AST in the cdata's metadata and adds it to the
/// ast-map.
-pub fn decode_inlined_item<'tcx>(cdata: &cstore::crate_metadata,
- tcx: &TyCtxt<'tcx>,
- parent_def_path: ast_map::DefPath,
- parent_did: DefId,
- ast_doc: rbml::Doc,
- orig_did: DefId)
- -> &'tcx InlinedItem {
+pub fn decode_inlined_item<'a, 'tcx>(cdata: &cstore::crate_metadata,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ parent_def_path: ast_map::DefPath,
+ parent_did: DefId,
+ ast_doc: rbml::Doc,
+ orig_did: DefId)
+ -> &'tcx InlinedItem {
debug!("> Decoding inlined fn: {:?}", tcx.item_path_str(orig_did));
let mut ast_dsr = reader::Decoder::new(ast_doc);
let from_id_range = Decodable::decode(&mut ast_dsr).unwrap();
match *self {
Def::Fn(did) => Def::Fn(did.tr(dcx)),
Def::Method(did) => Def::Method(did.tr(dcx)),
- Def::SelfTy(opt_did, impl_ids) => { Def::SelfTy(opt_did.map(|did| did.tr(dcx)),
- impl_ids.map(|(nid1, nid2)| {
- (dcx.tr_id(nid1),
- dcx.tr_id(nid2))
- })) }
+ Def::SelfTy(opt_did, impl_id) => { Def::SelfTy(opt_did.map(|did| did.tr(dcx)),
+ impl_id.map(|id| dcx.tr_id(id))) }
Def::Mod(did) => { Def::Mod(did.tr(dcx)) }
Def::ForeignMod(did) => { Def::ForeignMod(did.tr(dcx)) }
Def::Static(did, m) => { Def::Static(did.tr(dcx), m) }
// Versions of the type reading functions that don't need the full
// DecodeContext.
- fn read_ty_nodcx(&mut self,
- tcx: &TyCtxt<'tcx>, cdata: &cstore::crate_metadata) -> Ty<'tcx>;
- fn read_tys_nodcx(&mut self,
- tcx: &TyCtxt<'tcx>,
- cdata: &cstore::crate_metadata) -> Vec<Ty<'tcx>>;
- fn read_substs_nodcx(&mut self, tcx: &TyCtxt<'tcx>,
- cdata: &cstore::crate_metadata)
- -> subst::Substs<'tcx>;
+ fn read_ty_nodcx<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cdata: &cstore::crate_metadata) -> Ty<'tcx>;
+ fn read_tys_nodcx<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cdata: &cstore::crate_metadata) -> Vec<Ty<'tcx>>;
+ fn read_substs_nodcx<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cdata: &cstore::crate_metadata)
+ -> subst::Substs<'tcx>;
}
impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
- fn read_ty_nodcx(&mut self,
- tcx: &TyCtxt<'tcx>,
- cdata: &cstore::crate_metadata)
- -> Ty<'tcx> {
+ fn read_ty_nodcx<'b>(&mut self, tcx: TyCtxt<'b, 'tcx, 'tcx>,
+ cdata: &cstore::crate_metadata)
+ -> Ty<'tcx> {
self.read_opaque(|_, doc| {
Ok(
tydecode::TyDecoder::with_doc(tcx, cdata.cnum, doc,
}).unwrap()
}
- fn read_tys_nodcx(&mut self,
- tcx: &TyCtxt<'tcx>,
- cdata: &cstore::crate_metadata) -> Vec<Ty<'tcx>> {
+ fn read_tys_nodcx<'b>(&mut self, tcx: TyCtxt<'b, 'tcx, 'tcx>,
+ cdata: &cstore::crate_metadata) -> Vec<Ty<'tcx>> {
self.read_to_vec(|this| Ok(this.read_ty_nodcx(tcx, cdata)) )
.unwrap()
.into_iter()
.collect()
}
- fn read_substs_nodcx(&mut self,
- tcx: &TyCtxt<'tcx>,
- cdata: &cstore::crate_metadata)
- -> subst::Substs<'tcx>
+ fn read_substs_nodcx<'b>(&mut self, tcx: TyCtxt<'b, 'tcx, 'tcx>,
+ cdata: &cstore::crate_metadata)
+ -> subst::Substs<'tcx>
{
self.read_opaque(|_, doc| {
Ok(
}
c::tag_table_item_subst => {
let item_substs = ty::ItemSubsts {
- substs: val_dsr.read_substs(dcx)
+ substs: dcx.tcx.mk_substs(val_dsr.read_substs(dcx))
};
dcx.tcx.tables.borrow_mut().item_substs.insert(
id, item_substs);
parse::ParseSess::new()
}
+#[cfg(test)]
+fn with_testing_context<T, F: FnOnce(LoweringContext) -> T>(f: F) -> T {
+ let assigner = FakeNodeIdAssigner;
+ let mut resolver = DummyResolver;
+ let lcx = LoweringContext::new(&assigner, None, &mut resolver);
+ f(lcx)
+}
+
#[cfg(test)]
fn roundtrip(in_item: hir::Item) {
let mut wr = Cursor::new(Vec::new());
#[test]
fn test_basic() {
let cx = mk_ctxt();
- let fnia = FakeNodeIdAssigner;
- let lcx = LoweringContext::testing_context(&fnia);
- roundtrip(lower_item(&lcx, "e_item!(&cx,
- fn foo() {}
- ).unwrap()));
+ with_testing_context(|lcx| {
+ roundtrip(lower_item(&lcx, "e_item!(&cx,
+ fn foo() {}
+ ).unwrap()));
+ });
}
#[test]
fn test_smalltalk() {
let cx = mk_ctxt();
- let fnia = FakeNodeIdAssigner;
- let lcx = LoweringContext::testing_context(&fnia);
- roundtrip(lower_item(&lcx, "e_item!(&cx,
- fn foo() -> isize { 3 + 4 } // first smalltalk program ever executed.
- ).unwrap()));
+ with_testing_context(|lcx| {
+ roundtrip(lower_item(&lcx, "e_item!(&cx,
+ fn foo() -> isize { 3 + 4 } // first smalltalk program ever executed.
+ ).unwrap()));
+ });
}
#[test]
fn test_more() {
let cx = mk_ctxt();
- let fnia = FakeNodeIdAssigner;
- let lcx = LoweringContext::testing_context(&fnia);
- roundtrip(lower_item(&lcx, "e_item!(&cx,
- fn foo(x: usize, y: usize) -> usize {
- let z = x + y;
- return z;
- }
- ).unwrap()));
+ with_testing_context(|lcx| {
+ roundtrip(lower_item(&lcx, "e_item!(&cx,
+ fn foo(x: usize, y: usize) -> usize {
+ let z = x + y;
+ return z;
+ }
+ ).unwrap()));
+ });
}
#[test]
return alist {eq_fn: eq_int, data: Vec::new()};
}
).unwrap();
- let fnia = FakeNodeIdAssigner;
- let lcx = LoweringContext::testing_context(&fnia);
- let hir_item = lower_item(&lcx, &item);
- let item_in = InlinedItemRef::Item(&hir_item);
- let item_out = simplify_ast(item_in);
- let item_exp = InlinedItem::Item(P(lower_item(&lcx, "e_item!(&cx,
- fn new_int_alist<B>() -> alist<isize, B> {
- return alist {eq_fn: eq_int, data: Vec::new()};
+ let cx = mk_ctxt();
+ with_testing_context(|lcx| {
+ let hir_item = lower_item(&lcx, &item);
+ let item_in = InlinedItemRef::Item(&hir_item);
+ let item_out = simplify_ast(item_in);
+ let item_exp = InlinedItem::Item(P(lower_item(&lcx, "e_item!(&cx,
+ fn new_int_alist<B>() -> alist<isize, B> {
+ return alist {eq_fn: eq_int, data: Vec::new()};
+ }
+ ).unwrap())));
+ match (item_out, item_exp) {
+ (InlinedItem::Item(item_out), InlinedItem::Item(item_exp)) => {
+ assert!(pprust::item_to_string(&item_out) ==
+ pprust::item_to_string(&item_exp));
+ }
+ _ => bug!()
}
- ).unwrap())));
- match (item_out, item_exp) {
- (InlinedItem::Item(item_out), InlinedItem::Item(item_exp)) => {
- assert!(pprust::item_to_string(&item_out) ==
- pprust::item_to_string(&item_exp));
- }
- _ => bug!()
- }
+ });
}
option_env!("CFG_VERSION").unwrap_or("unknown version")
)
}
+
+pub const tag_panic_strategy: usize = 0x114;
use rustc::hir::svh::Svh;
use rustc::dep_graph::{DepGraph, DepNode};
use rustc::session::{config, Session};
+use rustc::session::config::PanicStrategy;
use rustc::session::search_paths::PathKind;
use rustc::middle::cstore::{CrateStore, validate_crate_name, ExternCrate};
use rustc::util::nodemap::FnvHashMap;
}
}
+ fn inject_panic_runtime(&mut self, krate: &ast::Crate) {
+ // If we're only compiling an rlib, then there's no need to select a
+ // panic runtime, so we just skip this section entirely.
+ let any_non_rlib = self.sess.crate_types.borrow().iter().any(|ct| {
+ *ct != config::CrateTypeRlib
+ });
+ if !any_non_rlib {
+ info!("panic runtime injection skipped, only generating rlib");
+ return
+ }
+
+ // If we need a panic runtime, we try to find an existing one here. At
+ // the same time we perform some general validation of the DAG we've got
+ // going such as ensuring everything has a compatible panic strategy.
+ //
+ // The logic for finding the panic runtime here is pretty much the same
+ // as the allocator case with the only addition that the panic strategy
+ // compilation mode also comes into play.
+ let desired_strategy = self.sess.opts.cg.panic.clone();
+ let mut runtime_found = false;
+ let mut needs_panic_runtime = attr::contains_name(&krate.attrs,
+ "needs_panic_runtime");
+ self.cstore.iter_crate_data(|cnum, data| {
+ needs_panic_runtime = needs_panic_runtime || data.needs_panic_runtime();
+ if data.is_panic_runtime() {
+ // Inject a dependency from all #![needs_panic_runtime] to this
+ // #![panic_runtime] crate.
+ self.inject_dependency_if(cnum, "a panic runtime",
+ &|data| data.needs_panic_runtime());
+ runtime_found = runtime_found || data.explicitly_linked.get();
+ }
+ });
+
+ // If an explicitly linked and matching panic runtime was found, or if
+ // we just don't need one at all, then we're done here and there's
+ // nothing else to do.
+ if !needs_panic_runtime || runtime_found {
+ return
+ }
+
+ // By this point we know that we (a) need a panic runtime and (b) no
+ // panic runtime was explicitly linked. Here we just load an appropriate
+ // default runtime for our panic strategy and then inject the
+ // dependencies.
+ //
+ // We may resolve to an already loaded crate (as the crate may not have
+ // been explicitly linked prior to this) and we may re-inject
+ // dependencies again, but both of those situations are fine.
+ //
+ // Also note that we have yet to perform validation of the crate graph
+ // in terms of everyone has a compatible panic runtime format, that's
+ // performed later as part of the `dependency_format` module.
+ let name = match desired_strategy {
+ PanicStrategy::Unwind => "panic_unwind",
+ PanicStrategy::Abort => "panic_abort",
+ };
+ info!("panic runtime not found -- loading {}", name);
+
+ let (cnum, data, _) = self.resolve_crate(&None, name, name, None,
+ codemap::DUMMY_SP,
+ PathKind::Crate, false);
+
+ // Sanity check the loaded crate to ensure it is indeed a panic runtime
+ // and the panic strategy is indeed what we thought it was.
+ if !data.is_panic_runtime() {
+ self.sess.err(&format!("the crate `{}` is not a panic runtime",
+ name));
+ }
+ if data.panic_strategy() != desired_strategy {
+ self.sess.err(&format!("the crate `{}` does not have the panic \
+ strategy `{}`",
+ name, desired_strategy.desc()));
+ }
+
+ self.sess.injected_panic_runtime.set(Some(cnum));
+ self.inject_dependency_if(cnum, "a panic runtime",
+ &|data| data.needs_panic_runtime());
+ }
+
fn inject_allocator_crate(&mut self) {
// Make sure that we actually need an allocator, if none of our
// dependencies need one then we definitely don't!
self.cstore.iter_crate_data(|cnum, data| {
needs_allocator = needs_allocator || data.needs_allocator();
if data.is_allocator() {
- debug!("{} required by rlib and is an allocator", data.name());
- self.inject_allocator_dependency(cnum);
+ info!("{} required by rlib and is an allocator", data.name());
+ self.inject_dependency_if(cnum, "an allocator",
+ &|data| data.needs_allocator());
found_required_allocator = found_required_allocator ||
data.explicitly_linked.get();
}
codemap::DUMMY_SP,
PathKind::Crate, false);
- // To ensure that the `-Z allocation-crate=foo` option isn't abused, and
- // to ensure that the allocator is indeed an allocator, we verify that
- // the crate loaded here is indeed tagged #![allocator].
+ // Sanity check the crate we loaded to ensure that it is indeed an
+ // allocator.
if !data.is_allocator() {
self.sess.err(&format!("the allocator crate `{}` is not tagged \
with #![allocator]", data.name()));
}
self.sess.injected_allocator.set(Some(cnum));
- self.inject_allocator_dependency(cnum);
+ self.inject_dependency_if(cnum, "an allocator",
+ &|data| data.needs_allocator());
}
- fn inject_allocator_dependency(&self, allocator: ast::CrateNum) {
+ fn inject_dependency_if(&self,
+ krate: ast::CrateNum,
+ what: &str,
+ needs_dep: &Fn(&cstore::crate_metadata) -> bool) {
+ // don't perform this validation if the session has errors, as one of
+ // those errors may indicate a circular dependency which could cause
+ // this to stack overflow.
+ if self.sess.has_errors() {
+ return
+ }
+
// Before we inject any dependencies, make sure we don't inject a
- // circular dependency by validating that this allocator crate doesn't
- // transitively depend on any `#![needs_allocator]` crates.
- validate(self, allocator, allocator);
-
- // All crates tagged with `needs_allocator` do not explicitly depend on
- // the allocator selected for this compile, but in order for this
- // compilation to be successfully linked we need to inject a dependency
- // (to order the crates on the command line correctly).
- //
- // Here we inject a dependency from all crates with #![needs_allocator]
- // to the crate tagged with #![allocator] for this compilation unit.
+ // circular dependency by validating that this crate doesn't
+ // transitively depend on any crates satisfying `needs_dep`.
+ validate(self, krate, krate, what, needs_dep);
+
+ // All crates satisfying `needs_dep` do not explicitly depend on the
+ // crate provided for this compile, but in order for this compilation to
+ // be successfully linked we need to inject a dependency (to order the
+ // crates on the command line correctly).
self.cstore.iter_crate_data(|cnum, data| {
- if !data.needs_allocator() {
+ if !needs_dep(data) {
return
}
- info!("injecting a dep from {} to {}", cnum, allocator);
+ info!("injecting a dep from {} to {}", cnum, krate);
let mut cnum_map = data.cnum_map.borrow_mut();
let remote_cnum = cnum_map.len() + 1;
- let prev = cnum_map.insert(remote_cnum as ast::CrateNum, allocator);
+ let prev = cnum_map.insert(remote_cnum as ast::CrateNum, krate);
assert!(prev.is_none());
});
- fn validate(me: &CrateReader, krate: ast::CrateNum,
- allocator: ast::CrateNum) {
+ fn validate(me: &CrateReader,
+ krate: ast::CrateNum,
+ root: ast::CrateNum,
+ what: &str,
+ needs_dep: &Fn(&cstore::crate_metadata) -> bool) {
let data = me.cstore.get_crate_data(krate);
- if data.needs_allocator() {
+ if needs_dep(&data) {
let krate_name = data.name();
- let data = me.cstore.get_crate_data(allocator);
- let alloc_name = data.name();
- me.sess.err(&format!("the allocator crate `{}` cannot depend \
- on a crate that needs an allocator, but \
- it depends on `{}`", alloc_name,
+ let data = me.cstore.get_crate_data(root);
+ let root_name = data.name();
+ me.sess.err(&format!("the crate `{}` cannot depend \
+ on a crate that needs {}, but \
+ it depends on `{}`", root_name, what,
krate_name));
}
for (_, &dep) in data.cnum_map.borrow().iter() {
- validate(me, dep, allocator);
+ validate(me, dep, root, what, needs_dep);
}
}
}
self.process_crate(self.krate);
visit::walk_crate(self, self.krate);
self.creader.inject_allocator_crate();
+ self.creader.inject_panic_runtime(self.krate);
if log_enabled!(log::INFO) {
dump_crates(&self.cstore);
use rustc::mir::repr::Mir;
use rustc::mir::mir_map::MirMap;
use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
+use rustc::session::config::PanicStrategy;
use std::cell::RefCell;
use std::rc::Rc;
decoder::get_visibility(&cdata, def.index)
}
- fn closure_kind(&self, _tcx: &TyCtxt<'tcx>, def_id: DefId) -> ty::ClosureKind
+ fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind
{
assert!(!def_id.is_local());
let cdata = self.get_crate_data(def_id.krate);
decoder::closure_kind(&cdata, def_id.index)
}
- fn closure_ty(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx>
+ fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx>
{
assert!(!def_id.is_local());
let cdata = self.get_crate_data(def_id.krate);
decoder::get_repr_attrs(&cdata, def.index)
}
- fn item_type(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::TypeScheme<'tcx>
+ fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::TypeScheme<'tcx>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_type(&cdata, def.index, tcx)
}
- fn item_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::GenericPredicates<'tcx>
+ fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::GenericPredicates<'tcx>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_predicates(&cdata, def.index, tcx)
}
- fn item_super_predicates(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> ty::GenericPredicates<'tcx>
+ fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> ty::GenericPredicates<'tcx>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_super_predicates(&cdata, def.index, tcx)
decoder::get_symbol(&cdata, def.index)
}
- fn trait_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::TraitDef<'tcx>
+ fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::TraitDef<'tcx>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_trait_def(&cdata, def.index, tcx)
}
- fn adt_def(&self, tcx: &TyCtxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
+ fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_adt_def(&self.intr, &cdata, def.index, tcx)
result
}
- fn provided_trait_methods(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Vec<Rc<ty::Method<'tcx>>>
+ fn provided_trait_methods<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Vec<Rc<ty::Method<'tcx>>>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_provided_trait_methods(self.intr.clone(), &cdata, def.index, tcx)
decoder::get_impl_polarity(&cdata, def.index)
}
- fn impl_trait_ref(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<ty::TraitRef<'tcx>>
+ fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<ty::TraitRef<'tcx>>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_impl_trait(&cdata, def.index, tcx)
}
// FIXME: killme
- fn associated_consts(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Vec<Rc<ty::AssociatedConst<'tcx>>> {
+ fn associated_consts<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Vec<Rc<ty::AssociatedConst<'tcx>>> {
let cdata = self.get_crate_data(def.krate);
decoder::get_associated_consts(self.intr.clone(), &cdata, def.index, tcx)
}
decoder::get_parent_impl(&*cdata, impl_def.index)
}
- fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) -> Option<DefId>
+ fn trait_of_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Option<DefId>
{
let cdata = self.get_crate_data(def_id.krate);
decoder::get_trait_of_item(&cdata, def_id.index, tcx)
}
- fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<ty::ImplOrTraitItem<'tcx>>
+ fn impl_or_trait_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<ty::ImplOrTraitItem<'tcx>>
{
let cdata = self.get_crate_data(def.krate);
decoder::get_impl_or_trait_item(
decoder::is_default_impl(&cdata, impl_did.index)
}
- fn is_extern_item(&self, tcx: &TyCtxt<'tcx>, did: DefId) -> bool {
+ fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool {
let cdata = self.get_crate_data(did.krate);
decoder::is_extern_item(&cdata, did.index, tcx)
}
self.get_crate_data(cnum).is_allocator()
}
+ fn is_panic_runtime(&self, cnum: ast::CrateNum) -> bool
+ {
+ self.get_crate_data(cnum).is_panic_runtime()
+ }
+
+ fn panic_strategy(&self, cnum: ast::CrateNum) -> PanicStrategy {
+ self.get_crate_data(cnum).panic_strategy()
+ }
+
fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec<ast::Attribute>
{
decoder::get_crate_attributes(self.get_crate_data(cnum).data())
result
}
- fn maybe_get_item_ast(&'tcx self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> FoundAst<'tcx>
+ fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> FoundAst<'tcx>
{
let cdata = self.get_crate_data(def.krate);
decoder::maybe_get_item_ast(&cdata, tcx, def.index)
}
- fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
- -> Option<Mir<'tcx>> {
+ fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> Option<Mir<'tcx>> {
let cdata = self.get_crate_data(def.krate);
decoder::maybe_get_item_mir(&cdata, tcx, def.index)
}
{
loader::meta_section_name(target)
}
- fn encode_type(&self,
- tcx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
- -> Vec<u8>
+ fn encode_type<'a>(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String)
+ -> Vec<u8>
{
encoder::encoded_ty(tcx, ty, def_id_to_string)
}
self.do_extern_mod_stmt_cnum(emod_id)
}
- fn encode_metadata(&self,
- tcx: &TyCtxt<'tcx>,
- reexports: &def::ExportMap,
- item_symbols: &RefCell<NodeMap<String>>,
- link_meta: &LinkMeta,
- reachable: &NodeSet,
- mir_map: &MirMap<'tcx>,
- krate: &hir::Crate) -> Vec<u8>
+ fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ reexports: &def::ExportMap,
+ item_symbols: &RefCell<NodeMap<String>>,
+ link_meta: &LinkMeta,
+ reachable: &NodeSet,
+ mir_map: &MirMap<'tcx>,
+ krate: &hir::Crate) -> Vec<u8>
{
let ecx = encoder::EncodeContext {
diag: tcx.sess.diagnostic(),
use rustc::hir::def_id::DefId;
use rustc::hir::svh::Svh;
use rustc::middle::cstore::{ExternCrate};
+use rustc::session::config::PanicStrategy;
use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
use std::cell::{RefCell, Ref, Cell};
let attrs = decoder::get_crate_attributes(self.data());
attr::contains_name(&attrs, "needs_allocator")
}
+
+ pub fn is_panic_runtime(&self) -> bool {
+ let attrs = decoder::get_crate_attributes(self.data());
+ attr::contains_name(&attrs, "panic_runtime")
+ }
+
+ pub fn needs_panic_runtime(&self) -> bool {
+ let attrs = decoder::get_crate_attributes(self.data());
+ attr::contains_name(&attrs, "needs_panic_runtime")
+ }
+
+ pub fn panic_strategy(&self) -> PanicStrategy {
+ decoder::get_panic_strategy(self.data())
+ }
}
impl MetadataBlob {
use rustc::hir::map as hir_map;
use rustc::util::nodemap::FnvHashMap;
use rustc::hir;
+use rustc::session::config::PanicStrategy;
use middle::cstore::{LOCAL_CRATE, FoundAst, InlinedItem, LinkagePreference};
use middle::cstore::{DefLike, DlDef, DlField, DlImpl, tls};
})
}
-fn doc_type<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd) -> Ty<'tcx> {
+fn doc_type<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) -> Ty<'tcx> {
let tp = reader::get_doc(doc, tag_items_data_item_type);
TyDecoder::with_doc(tcx, cdata.cnum, tp,
&mut |did| translate_def_id(cdata, did))
.parse_ty()
}
-fn maybe_doc_type<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd) -> Option<Ty<'tcx>> {
+fn maybe_doc_type<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd)
+ -> Option<Ty<'tcx>> {
reader::maybe_get_doc(doc, tag_items_data_item_type).map(|tp| {
TyDecoder::with_doc(tcx, cdata.cnum, tp,
&mut |did| translate_def_id(cdata, did))
})
}
-pub fn item_type<'tcx>(_item_id: DefId, item: rbml::Doc,
- tcx: &TyCtxt<'tcx>, cdata: Cmd) -> Ty<'tcx> {
+pub fn item_type<'a, 'tcx>(_item_id: DefId, item: rbml::Doc,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd) -> Ty<'tcx> {
doc_type(item, tcx, cdata)
}
-fn doc_trait_ref<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd)
- -> ty::TraitRef<'tcx> {
+fn doc_trait_ref<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd)
+ -> ty::TraitRef<'tcx> {
TyDecoder::with_doc(tcx, cdata.cnum, doc,
&mut |did| translate_def_id(cdata, did))
.parse_trait_ref()
}
-fn item_trait_ref<'tcx>(doc: rbml::Doc, tcx: &TyCtxt<'tcx>, cdata: Cmd)
- -> ty::TraitRef<'tcx> {
+fn item_trait_ref<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: Cmd)
+ -> ty::TraitRef<'tcx> {
let tp = reader::get_doc(doc, tag_item_trait_ref);
doc_trait_ref(tp, tcx, cdata)
}
.collect()
}
-pub fn get_trait_def<'tcx>(cdata: Cmd,
- item_id: DefIndex,
- tcx: &TyCtxt<'tcx>) -> ty::TraitDef<'tcx>
+pub fn get_trait_def<'a, 'tcx>(cdata: Cmd,
+ item_id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::TraitDef<'tcx>
{
let item_doc = cdata.lookup_item(item_id);
let generics = doc_generics(item_doc, tcx, cdata, tag_item_generics);
associated_type_names)
}
-pub fn get_adt_def<'tcx>(intr: &IdentInterner,
- cdata: Cmd,
- item_id: DefIndex,
- tcx: &TyCtxt<'tcx>) -> ty::AdtDefMaster<'tcx>
+pub fn get_adt_def<'a, 'tcx>(intr: &IdentInterner,
+ cdata: Cmd,
+ item_id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::AdtDefMaster<'tcx>
{
fn expect_variant_kind(family: Family) -> ty::VariantKind {
match family_to_variant_kind(family) {
adt
}
-pub fn get_predicates<'tcx>(cdata: Cmd,
- item_id: DefIndex,
- tcx: &TyCtxt<'tcx>)
- -> ty::GenericPredicates<'tcx>
+pub fn get_predicates<'a, 'tcx>(cdata: Cmd,
+ item_id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::GenericPredicates<'tcx>
{
let item_doc = cdata.lookup_item(item_id);
doc_predicates(item_doc, tcx, cdata, tag_item_generics)
}
-pub fn get_super_predicates<'tcx>(cdata: Cmd,
- item_id: DefIndex,
- tcx: &TyCtxt<'tcx>)
- -> ty::GenericPredicates<'tcx>
+pub fn get_super_predicates<'a, 'tcx>(cdata: Cmd,
+ item_id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::GenericPredicates<'tcx>
{
let item_doc = cdata.lookup_item(item_id);
doc_predicates(item_doc, tcx, cdata, tag_item_super_predicates)
}
-pub fn get_type<'tcx>(cdata: Cmd, id: DefIndex, tcx: &TyCtxt<'tcx>)
- -> ty::TypeScheme<'tcx>
+pub fn get_type<'a, 'tcx>(cdata: Cmd, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::TypeScheme<'tcx>
{
let item_doc = cdata.lookup_item(id);
let t = item_type(DefId { krate: cdata.cnum, index: id }, item_doc, tcx,
})
}
-pub fn get_impl_trait<'tcx>(cdata: Cmd,
- id: DefIndex,
- tcx: &TyCtxt<'tcx>)
- -> Option<ty::TraitRef<'tcx>>
+pub fn get_impl_trait<'a, 'tcx>(cdata: Cmd,
+ id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> Option<ty::TraitRef<'tcx>>
{
let item_doc = cdata.lookup_item(id);
let fam = item_family(item_doc);
item_name(intr, cdata.lookup_item(id))
}
-pub fn maybe_get_item_ast<'tcx>(cdata: Cmd, tcx: &TyCtxt<'tcx>, id: DefIndex)
- -> FoundAst<'tcx> {
+pub fn maybe_get_item_ast<'a, 'tcx>(cdata: Cmd, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex)
+ -> FoundAst<'tcx> {
debug!("Looking up item: {:?}", id);
let item_doc = cdata.lookup_item(id);
let item_did = item_def_id(item_doc, cdata);
false
}
-pub fn maybe_get_item_mir<'tcx>(cdata: Cmd,
- tcx: &TyCtxt<'tcx>,
- id: DefIndex)
- -> Option<mir::repr::Mir<'tcx>> {
+pub fn maybe_get_item_mir<'a, 'tcx>(cdata: Cmd,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ id: DefIndex)
+ -> Option<mir::repr::Mir<'tcx>> {
let item_doc = cdata.lookup_item(id);
return reader::maybe_get_doc(item_doc, tag_mir as usize).map(|mir_doc| {
}
}
-pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
- cdata: Cmd,
- id: DefIndex,
- tcx: &TyCtxt<'tcx>)
- -> Option<ty::ImplOrTraitItem<'tcx>> {
+pub fn get_impl_or_trait_item<'a, 'tcx>(intr: Rc<IdentInterner>,
+ cdata: Cmd,
+ id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> Option<ty::ImplOrTraitItem<'tcx>> {
let item_doc = cdata.lookup_item(id);
let def_id = item_def_id(item_doc, cdata);
let predicates = doc_predicates(item_doc, tcx, cdata, tag_method_ty_generics);
let ity = tcx.lookup_item_type(def_id).ty;
let fty = match ity.sty {
- ty::TyFnDef(_, _, fty) => fty.clone(),
+ ty::TyFnDef(_, _, fty) => fty,
_ => bug!(
"the type {:?} of the method {:?} is not a function?",
ity, name)
Decodable::decode(&mut decoder).unwrap()
}
-pub fn get_provided_trait_methods<'tcx>(intr: Rc<IdentInterner>,
- cdata: Cmd,
- id: DefIndex,
- tcx: &TyCtxt<'tcx>)
- -> Vec<Rc<ty::Method<'tcx>>> {
+pub fn get_provided_trait_methods<'a, 'tcx>(intr: Rc<IdentInterner>,
+ cdata: Cmd,
+ id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> Vec<Rc<ty::Method<'tcx>>> {
let item = cdata.lookup_item(id);
reader::tagged_docs(item, tag_item_trait_item).filter_map(|mth_id| {
}).collect()
}
-pub fn get_associated_consts<'tcx>(intr: Rc<IdentInterner>,
- cdata: Cmd,
- id: DefIndex,
- tcx: &TyCtxt<'tcx>)
- -> Vec<Rc<ty::AssociatedConst<'tcx>>> {
+pub fn get_associated_consts<'a, 'tcx>(intr: Rc<IdentInterner>,
+ cdata: Cmd,
+ id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> Vec<Rc<ty::AssociatedConst<'tcx>>> {
let item = cdata.lookup_item(id);
[tag_item_trait_item, tag_item_impl_item].iter().flat_map(|&tag| {
}
}
-pub fn get_trait_of_item(cdata: Cmd, id: DefIndex, tcx: &TyCtxt)
- -> Option<DefId> {
+pub fn get_trait_of_item<'a, 'tcx>(cdata: Cmd,
+ id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> Option<DefId> {
let item_doc = cdata.lookup_item(id);
let parent_item_id = match item_parent_item(cdata, item_doc) {
None => return None,
}
}
-pub fn is_extern_item(cdata: Cmd, id: DefIndex, tcx: &TyCtxt) -> bool {
+pub fn is_extern_item<'a, 'tcx>(cdata: Cmd,
+ id: DefIndex,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> bool {
let item_doc = match cdata.get_item(id) {
Some(doc) => doc,
None => return false,
}
}
-fn doc_generics<'tcx>(base_doc: rbml::Doc,
- tcx: &TyCtxt<'tcx>,
- cdata: Cmd,
- tag: usize)
- -> ty::Generics<'tcx>
+fn doc_generics<'a, 'tcx>(base_doc: rbml::Doc,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cdata: Cmd,
+ tag: usize)
+ -> ty::Generics<'tcx>
{
let doc = reader::get_doc(base_doc, tag);
ty::Generics { types: types, regions: regions }
}
-fn doc_predicate<'tcx>(cdata: Cmd,
- doc: rbml::Doc,
- tcx: &TyCtxt<'tcx>)
- -> ty::Predicate<'tcx>
+fn doc_predicate<'a, 'tcx>(cdata: Cmd,
+ doc: rbml::Doc,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::Predicate<'tcx>
{
let predicate_pos = cdata.xref_index.lookup(
cdata.data(), reader::doc_as_u32(doc)).unwrap() as usize;
).parse_predicate()
}
-fn doc_predicates<'tcx>(base_doc: rbml::Doc,
- tcx: &TyCtxt<'tcx>,
- cdata: Cmd,
- tag: usize)
- -> ty::GenericPredicates<'tcx>
+fn doc_predicates<'a, 'tcx>(base_doc: rbml::Doc,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cdata: Cmd,
+ tag: usize)
+ -> ty::GenericPredicates<'tcx>
{
let doc = reader::get_doc(base_doc, tag);
ty::ClosureKind::decode(&mut decoder).unwrap()
}
-pub fn closure_ty<'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: &TyCtxt<'tcx>)
- -> ty::ClosureTy<'tcx> {
+pub fn closure_ty<'a, 'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::ClosureTy<'tcx> {
let closure_doc = cdata.lookup_item(closure_id);
let closure_ty_doc = reader::get_doc(closure_doc, tag_items_closure_ty);
TyDecoder::with_doc(tcx, cdata.cnum, closure_ty_doc, &mut |did| translate_def_id(cdata, did))
debug!("def_path(id={:?})", id);
hir_map::DefPath::make(cdata.cnum, id, |parent| def_key(cdata, parent))
}
+
+pub fn get_panic_strategy(data: &[u8]) -> PanicStrategy {
+ let crate_doc = rbml::Doc::new(data);
+ let strat_doc = reader::get_doc(crate_doc, tag_panic_strategy);
+ match reader::doc_as_u8(strat_doc) {
+ b'U' => PanicStrategy::Unwind,
+ b'A' => PanicStrategy::Abort,
+ b => panic!("unknown panic strategy in metadata: {}", b),
+ }
+}
use rustc::hir::def;
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
use middle::dependency_format::Linkage;
-use middle::stability;
use rustc::ty::subst;
use rustc::traits::specialization_graph;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::hir::svh::Svh;
use rustc::mir::mir_map::MirMap;
-use rustc::session::config;
+use rustc::session::config::{self, PanicStrategy};
use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet};
use rustc_serialize::Encodable;
pub struct EncodeContext<'a, 'tcx: 'a> {
pub diag: &'a Handler,
- pub tcx: &'a TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub reexports: &'a def::ExportMap,
pub item_symbols: &'a RefCell<NodeMap<String>>,
pub link_meta: &'a LinkMeta,
(did.krate as u64) << 32 | (did.index.as_usize() as u64)
}
-pub fn def_to_string(_tcx: &TyCtxt, did: DefId) -> String {
+pub fn def_to_string(_tcx: TyCtxt, did: DefId) -> String {
format!("{}:{}", did.krate, did.index.as_usize())
}
debug!("encode_enum_variant_info(did={:?})", did);
let repr_hints = ecx.tcx.lookup_repr_hints(did);
let repr_type = ecx.tcx.enum_repr_type(repr_hints.get(0));
- let mut disr_val = repr_type.initial_discriminant(&ecx.tcx);
+ let mut disr_val = repr_type.initial_discriminant(ecx.tcx);
let def = ecx.tcx.lookup_adt_def(did);
for variant in &def.variants {
let vid = variant.did;
encode_attributes(rbml_w, &attrs);
encode_repr_attrs(rbml_w, ecx, &attrs);
- let stab = stability::lookup_stability(ecx.tcx, vid);
- let depr = stability::lookup_deprecation(ecx.tcx, vid);
+ let stab = ecx.tcx.lookup_stability(vid);
+ let depr = ecx.tcx.lookup_deprecation(vid);
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_visibility(rbml_w, vis);
- let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(id));
- let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(id));
+ let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(id));
+ let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(id));
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_bounds_and_type_for_item(rbml_w, ecx, index, id);
encode_def_id_and_key(ecx, rbml_w, field.did);
- let stab = stability::lookup_stability(ecx.tcx, field.did);
- let depr = stability::lookup_deprecation(ecx.tcx, field.did);
+ let stab = ecx.tcx.lookup_stability(field.did);
+ let depr = ecx.tcx.lookup_deprecation(field.did);
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_symbol(ecx, rbml_w, ctor_id);
}
- let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(ctor_id));
- let depr= stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(ctor_id));
+ let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(ctor_id));
+ let depr= ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(ctor_id));
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_bounds_and_type_for_item(rbml_w, ecx, index,
ecx.local_id(associated_const.def_id));
- let stab = stability::lookup_stability(ecx.tcx, associated_const.def_id);
- let depr = stability::lookup_deprecation(ecx.tcx, associated_const.def_id);
+ let stab = ecx.tcx.lookup_stability(associated_const.def_id);
+ let depr = ecx.tcx.lookup_deprecation(associated_const.def_id);
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id));
encode_item_sort(rbml_w, 'r');
- let stab = stability::lookup_stability(ecx.tcx, m.def_id);
- let depr = stability::lookup_deprecation(ecx.tcx, m.def_id);
+ let stab = ecx.tcx.lookup_stability(m.def_id);
+ let depr = ecx.tcx.lookup_deprecation(m.def_id);
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id));
encode_item_sort(rbml_w, 't');
- let stab = stability::lookup_stability(ecx.tcx, associated_type.def_id);
- let depr = stability::lookup_deprecation(ecx.tcx, associated_type.def_id);
+ let stab = ecx.tcx.lookup_stability(associated_type.def_id);
+ let depr = ecx.tcx.lookup_deprecation(associated_type.def_id);
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
let vis = &item.vis;
let def_id = ecx.tcx.map.local_def_id(item.id);
- let stab = stability::lookup_stability(tcx, ecx.tcx.map.local_def_id(item.id));
- let depr = stability::lookup_deprecation(tcx, ecx.tcx.map.local_def_id(item.id));
+ let stab = tcx.lookup_stability(ecx.tcx.map.local_def_id(item.id));
+ let depr = tcx.lookup_deprecation(ecx.tcx.map.local_def_id(item.id));
match item.node {
hir::ItemStatic(_, m, _) => {
encode_parent_item(rbml_w, def_id);
- let stab = stability::lookup_stability(tcx, item_def_id.def_id());
- let depr = stability::lookup_deprecation(tcx, item_def_id.def_id());
+ let stab = tcx.lookup_stability(item_def_id.def_id());
+ let depr = tcx.lookup_deprecation(item_def_id.def_id());
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_symbol(ecx, rbml_w, nitem.id);
}
encode_attributes(rbml_w, &nitem.attrs);
- let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
- let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
+ let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(nitem.id));
+ let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(nitem.id));
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_method_argument_names(rbml_w, &fndecl);
}
encode_bounds_and_type_for_item(rbml_w, ecx, index, nitem.id);
encode_attributes(rbml_w, &nitem.attrs);
- let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
- let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
+ let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(nitem.id));
+ let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(nitem.id));
encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr);
encode_symbol(ecx, rbml_w, nitem.id);
struct ImplVisitor<'a, 'tcx:'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
impls: FnvHashMap<DefId, Vec<DefId>>
}
}
}
+fn encode_panic_strategy(rbml_w: &mut Encoder, ecx: &EncodeContext) {
+ match ecx.tcx.sess.opts.cg.panic {
+ PanicStrategy::Unwind => {
+ rbml_w.wr_tagged_u8(tag_panic_strategy, b'U');
+ }
+ PanicStrategy::Abort => {
+ rbml_w.wr_tagged_u8(tag_panic_strategy, b'A');
+ }
+ }
+}
+
// NB: Increment this as you change the metadata encoding version.
#[allow(non_upper_case_globals)]
pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2 ];
encode_hash(rbml_w, &ecx.link_meta.crate_hash);
encode_crate_disambiguator(rbml_w, &ecx.tcx.sess.crate_disambiguator.get().as_str());
encode_dylib_dependency_formats(rbml_w, &ecx);
+ encode_panic_strategy(rbml_w, &ecx);
let mut i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap();
encode_attributes(rbml_w, &krate.attrs);
}
// Get the encoded string for a type
-pub fn encoded_ty<'tcx>(tcx: &TyCtxt<'tcx>,
- t: Ty<'tcx>,
- def_id_to_string: fn(&TyCtxt<'tcx>, DefId) -> String)
- -> Vec<u8> {
+pub fn encoded_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ t: Ty<'tcx>,
+ def_id_to_string: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String)
+ -> Vec<u8> {
let mut wr = Cursor::new(Vec::new());
tyencode::enc_ty(&mut wr, &tyencode::ctxt {
diag: tcx.sess.diagnostic(),
impl<'a, 'tcx: 'a> tls::EncodingContext<'tcx> for encoder::EncodeContext<'a, 'tcx> {
- fn tcx<'s>(&'s self) -> &'s TyCtxt<'tcx> {
- &self.tcx
+ fn tcx<'s>(&'s self) -> TyCtxt<'s, 'tcx, 'tcx> {
+ self.tcx
}
fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: ty::Ty<'tcx>) {
pub struct DecodingContext<'a, 'tcx: 'a> {
pub crate_metadata: Cmd<'a>,
- pub tcx: &'a TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx: 'a> tls::DecodingContext<'tcx> for DecodingContext<'a, 'tcx> {
- fn tcx<'s>(&'s self) -> &'s TyCtxt<'tcx> {
- &self.tcx
+ fn tcx<'s>(&'s self) -> TyCtxt<'s, 'tcx, 'tcx> {
+ self.tcx
}
fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx> {
data: &'a [u8],
krate: ast::CrateNum,
pos: usize,
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
conv_def_id: DefIdConvert<'a>,
}
impl<'a,'tcx> TyDecoder<'a,'tcx> {
- pub fn with_doc(tcx: &'a TyCtxt<'tcx>,
+ pub fn with_doc(tcx: TyCtxt<'a, 'tcx, 'tcx>,
crate_num: ast::CrateNum,
doc: rbml::Doc<'a>,
conv: DefIdConvert<'a>)
pub fn new(data: &'a [u8],
crate_num: ast::CrateNum,
pos: usize,
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
conv: DefIdConvert<'a>)
-> TyDecoder<'a, 'tcx> {
TyDecoder {
}
}
- pub fn parse_bare_fn_ty(&mut self) -> ty::BareFnTy<'tcx> {
+ pub fn parse_bare_fn_ty(&mut self) -> &'tcx ty::BareFnTy<'tcx> {
let unsafety = parse_unsafety(self.next());
let abi = self.parse_abi_set();
let sig = self.parse_sig();
- ty::BareFnTy {
+ self.tcx.mk_bare_fn(ty::BareFnTy {
unsafety: unsafety,
abi: abi,
sig: sig
- }
+ })
}
fn parse_sig(&mut self) -> ty::PolyFnSig<'tcx> {
pub struct ctxt<'a, 'tcx: 'a> {
pub diag: &'a Handler,
// Def -> str Callback:
- pub ds: fn(&TyCtxt<'tcx>, DefId) -> String,
+ pub ds: for<'b> fn(TyCtxt<'b, 'tcx, 'tcx>, DefId) -> String,
// The type context.
- pub tcx: &'a TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub abbrevs: &'a abbrev_map<'tcx>
}
enc_existential_bounds(w, cx, bounds);
write!(w, "]");
}
- ty::TyTuple(ref ts) => {
+ ty::TyTuple(ts) => {
write!(w, "T[");
for t in ts { enc_ty(w, cx, *t); }
write!(w, "]");
enc_substs(w, cx, substs);
write!(w, "]");
}
- ty::TyClosure(def, ref substs) => {
+ ty::TyClosure(def, substs) => {
write!(w, "k[{}|", (cx.ds)(cx.tcx, def));
- enc_substs(w, cx, &substs.func_substs);
- for ty in &substs.upvar_tys {
+ enc_substs(w, cx, substs.func_substs);
+ for ty in substs.upvar_tys {
enc_ty(w, cx, ty);
}
write!(w, ".");
use rustc::mir::repr::*;
use rustc::hir;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn ast_block(&mut self,
destination: &Lvalue<'tcx>,
// FIXME(#32959): temporary measure for the issue
use hair::*;
use rustc::mir::repr::*;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr`, yielding a compile-time constant. Assumes that
/// `expr` is a valid compile-time constant!
pub fn as_constant<M>(&mut self, expr: M) -> Constant<'tcx>
use hair::*;
use rustc::mir::repr::*;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr`, yielding an lvalue that we can move from etc.
pub fn as_lvalue<M>(&mut self,
block: BasicBlock,
use hair::*;
use rustc::mir::repr::*;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr` into a value that can be used as an operand.
/// If `expr` is an lvalue like `x`, this will introduce a
/// temporary `tmp = x`, so that we capture the value of `x` at
use hair::*;
use rustc::mir::repr::*;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr`, yielding an rvalue.
pub fn as_rvalue<M>(&mut self, block: BasicBlock, expr: M) -> BlockAnd<Rvalue<'tcx>>
where M: Mirror<'tcx, Output = Expr<'tcx>>
use hair::*;
use rustc::mir::repr::*;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr` into a fresh temporary. This is used when building
/// up rvalues so as to freeze the value that will be consumed.
pub fn as_temp<M>(&mut self, block: BasicBlock, expr: M) -> BlockAnd<Lvalue<'tcx>>
use rustc::ty;
use rustc::mir::repr::*;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Compile `expr`, storing the result into `destination`, which
/// is assumed to be uninitialized.
pub fn into_expr(&mut self,
use rustc::mir::repr::*;
use syntax::codemap::Span;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd<()> {
let this = self;
use rustc::mir::repr::*;
pub trait EvalInto<'tcx> {
- fn eval_into<'a>(self,
- builder: &mut Builder<'a, 'tcx>,
- destination: &Lvalue<'tcx>,
- block: BasicBlock)
- -> BlockAnd<()>;
+ fn eval_into<'a, 'gcx>(self,
+ builder: &mut Builder<'a, 'gcx, 'tcx>,
+ destination: &Lvalue<'tcx>,
+ block: BasicBlock)
+ -> BlockAnd<()>;
}
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn into<E>(&mut self,
destination: &Lvalue<'tcx>,
block: BasicBlock,
}
impl<'tcx> EvalInto<'tcx> for ExprRef<'tcx> {
- fn eval_into<'a>(self,
- builder: &mut Builder<'a, 'tcx>,
- destination: &Lvalue<'tcx>,
- block: BasicBlock)
- -> BlockAnd<()> {
+ fn eval_into<'a, 'gcx>(self,
+ builder: &mut Builder<'a, 'gcx, 'tcx>,
+ destination: &Lvalue<'tcx>,
+ block: BasicBlock)
+ -> BlockAnd<()> {
let expr = builder.hir.mirror(self);
builder.into_expr(destination, block, expr)
}
}
impl<'tcx> EvalInto<'tcx> for Expr<'tcx> {
- fn eval_into<'a>(self,
- builder: &mut Builder<'a, 'tcx>,
- destination: &Lvalue<'tcx>,
- block: BasicBlock)
- -> BlockAnd<()> {
+ fn eval_into<'a, 'gcx>(self,
+ builder: &mut Builder<'a, 'gcx, 'tcx>,
+ destination: &Lvalue<'tcx>,
+ block: BasicBlock)
+ -> BlockAnd<()> {
builder.into_expr(destination, block, self)
}
}
mod test;
mod util;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn match_expr(&mut self,
destination: &Lvalue<'tcx>,
span: Span,
///////////////////////////////////////////////////////////////////////////
// Main matching algorithm
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// The main match algorithm. It begins with a set of candidates
/// `candidates` and has the job of generating code to determine
/// which of these candidates, if any, is the correct one. The
use std::mem;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn simplify_candidate<'pat>(&mut self,
mut block: BasicBlock,
candidate: &mut Candidate<'pat, 'tcx>)
use rustc::mir::repr::*;
use syntax::codemap::Span;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Identifies what test is needed to decide if `match_pair` is applicable.
///
/// It is a bug to call this with a simplifyable pattern.
use rustc::mir::repr::*;
use std::u32;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn field_match_pairs<'pat>(&mut self,
lvalue: Lvalue<'tcx>,
subpatterns: &'pat [FieldPattern<'tcx>])
use std::u32;
use syntax::codemap::Span;
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Add a new temporary value of type `ty` storing the result of
/// evaluating `expr`.
///
use syntax::codemap::Span;
use syntax::parse::token::keywords;
-pub struct Builder<'a, 'tcx: 'a> {
- hir: Cx<'a, 'tcx>,
+pub struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ hir: Cx<'a, 'gcx, 'tcx>,
cfg: CFG<'tcx>,
fn_span: Span,
///////////////////////////////////////////////////////////////////////////
/// the main entry point for building MIR for a function
-pub fn construct_fn<'a, 'tcx, A>(hir: Cx<'a,'tcx>,
- fn_id: ast::NodeId,
- arguments: A,
- return_ty: ty::FnOutput<'tcx>,
- ast_block: &'tcx hir::Block)
- -> (Mir<'tcx>, ScopeAuxiliaryVec)
- where A: Iterator<Item=(Ty<'tcx>, Option<&'tcx hir::Pat>)>
+pub fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>,
+ fn_id: ast::NodeId,
+ arguments: A,
+ return_ty: ty::FnOutput<'gcx>,
+ ast_block: &'gcx hir::Block)
+ -> (Mir<'tcx>, ScopeAuxiliaryVec)
+ where A: Iterator<Item=(Ty<'gcx>, Option<&'gcx hir::Pat>)>
{
let tcx = hir.tcx();
let span = tcx.map.span(fn_id);
builder.finish(upvar_decls, arg_decls, return_ty)
}
-pub fn construct_const<'a, 'tcx>(hir: Cx<'a,'tcx>,
- item_id: ast::NodeId,
- ast_expr: &'tcx hir::Expr)
- -> (Mir<'tcx>, ScopeAuxiliaryVec) {
+pub fn construct_const<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>,
+ item_id: ast::NodeId,
+ ast_expr: &'tcx hir::Expr)
+ -> (Mir<'tcx>, ScopeAuxiliaryVec) {
let tcx = hir.tcx();
let span = tcx.map.span(item_id);
let mut builder = Builder::new(hir, span);
builder.finish(vec![], vec![], ty::FnConverging(ty))
}
-impl<'a,'tcx> Builder<'a,'tcx> {
- fn new(hir: Cx<'a, 'tcx>, span: Span) -> Builder<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
+ fn new(hir: Cx<'a, 'gcx, 'tcx>, span: Span) -> Builder<'a, 'gcx, 'tcx> {
let mut builder = Builder {
hir: hir,
cfg: CFG { basic_blocks: vec![] },
return_ty: ty::FnOutput<'tcx>,
arguments: A,
argument_scope_id: ScopeId,
- ast_block: &'tcx hir::Block)
+ ast_block: &'gcx hir::Block)
-> BlockAnd<Vec<ArgDecl<'tcx>>>
- where A: Iterator<Item=(Ty<'tcx>, Option<&'tcx hir::Pat>)>
+ where A: Iterator<Item=(Ty<'gcx>, Option<&'gcx hir::Pat>)>
{
// to start, translate the argument patterns and collect the argument types.
let arg_decls = arguments.enumerate().map(|(index, (ty, pattern))| {
}
}
-impl<'a,'tcx> Builder<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// Adding and removing scopes
// ==========================
/// Start a loop scope, which tracks where `continue` and `break`
break_block: BasicBlock,
f: F)
-> bool
- where F: FnOnce(&mut Builder<'a, 'tcx>)
+ where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>)
{
let extent = self.extent_of_innermost_scope();
let loop_scope = LoopScope {
/// Convenience wrapper that pushes a scope and then executes `f`
/// to build its contents, popping the scope afterwards.
pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
- where F: FnOnce(&mut Builder<'a, 'tcx>, ScopeId) -> BlockAnd<R>
+ where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>, ScopeId) -> BlockAnd<R>
{
debug!("in_scope(extent={:?}, block={:?})", extent, block);
let id = self.push_scope(extent, block);
block.unit()
}
-fn build_diverge_scope<'tcx>(tcx: &TyCtxt<'tcx>,
- cfg: &mut CFG<'tcx>,
- unit_temp: &Lvalue<'tcx>,
- scope: &mut Scope<'tcx>,
- mut target: BasicBlock)
- -> BasicBlock
+fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ cfg: &mut CFG<'tcx>,
+ unit_temp: &Lvalue<'tcx>,
+ scope: &mut Scope<'tcx>,
+ mut target: BasicBlock)
+ -> BasicBlock
{
// Build up the drops in **reverse** order. The end result will
// look like:
target
}
-fn build_free<'tcx>(tcx: &TyCtxt<'tcx>,
- unit_temp: &Lvalue<'tcx>,
- data: &FreeData<'tcx>,
- target: BasicBlock)
- -> TerminatorKind<'tcx> {
+fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ unit_temp: &Lvalue<'tcx>,
+ data: &FreeData<'tcx>,
+ target: BasicBlock)
+ -> TerminatorKind<'tcx> {
let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
.unwrap_or_else(|e| tcx.sess.fatal(&e));
let substs = tcx.mk_substs(Substs::new(
use dot;
use rustc::mir::repr::*;
-use rustc::ty;
+use rustc::ty::{self, TyCtxt};
use std::fmt::Debug;
use std::io::{self, Write};
use syntax::ast::NodeId;
/// Write a graphviz DOT graph of a list of MIRs.
-pub fn write_mir_graphviz<'a, 't, W, I>(tcx: &ty::TyCtxt<'t>, iter: I, w: &mut W) -> io::Result<()>
+pub fn write_mir_graphviz<'a, 'b, 'tcx, W, I>(tcx: TyCtxt<'b, 'tcx, 'tcx>,
+ iter: I, w: &mut W)
+ -> io::Result<()>
where W: Write, I: Iterator<Item=(&'a NodeId, &'a Mir<'a>)> {
for (&nodeid, mir) in iter {
writeln!(w, "digraph Mir_{} {{", nodeid)?;
/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
/// all the variables and temporaries.
-fn write_graph_label<W: Write>(tcx: &ty::TyCtxt, nid: NodeId, mir: &Mir, w: &mut W)
--> io::Result<()> {
+fn write_graph_label<'a, 'tcx, W: Write>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ nid: NodeId,
+ mir: &Mir,
+ w: &mut W)
+ -> io::Result<()> {
write!(w, " label=<fn {}(", dot::escape_html(&tcx.node_path_str(nid)))?;
// fn argument types.
impl<'tcx> Mirror<'tcx> for &'tcx hir::Block {
type Output = Block<'tcx>;
- fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Block<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Block<'tcx> {
// We have to eagerly translate the "spine" of the statements
// in order to get the lexical scoping correctly.
let stmts = mirror_stmts(cx, self.id, &*self.stmts);
}
}
-fn mirror_stmts<'a,'tcx:'a>(cx: &mut Cx<'a,'tcx>,
- block_id: ast::NodeId,
- stmts: &'tcx [hir::Stmt])
- -> Vec<StmtRef<'tcx>>
+fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ block_id: ast::NodeId,
+ stmts: &'tcx [hir::Stmt])
+ -> Vec<StmtRef<'tcx>>
{
let mut result = vec![];
for (index, stmt) in stmts.iter().enumerate() {
return result;
}
-pub fn to_expr_ref<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, block: &'tcx hir::Block) -> ExprRef<'tcx> {
+pub fn to_expr_ref<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ block: &'tcx hir::Block)
+ -> ExprRef<'tcx> {
let block_ty = cx.tcx.node_id_to_type(block.id);
let temp_lifetime = cx.tcx.region_maps.temporary_scope(block.id);
let expr = Expr {
impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
type Output = Expr<'tcx>;
- fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Expr<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> {
let temp_lifetime = cx.tcx.region_maps.temporary_scope(self.id);
let expr_extent = cx.tcx.region_maps.node_extent(self.id);
}
}
-fn make_mirror_unadjusted<'a, 'tcx>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) -> Expr<'tcx> {
+fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr)
+ -> Expr<'tcx> {
let expr_ty = cx.tcx.expr_ty(expr);
let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id);
})
} else { None };
if let Some((adt_def, index)) = adt_data {
- let substs = cx.tcx.mk_substs(cx.tcx.node_id_item_substs(fun.id).substs);
+ let substs = cx.tcx.node_id_item_substs(fun.id).substs;
let field_refs = args.iter().enumerate().map(|(idx, e)| FieldExprRef {
name: Field::new(idx),
expr: e.to_ref()
hir::ExprClosure(..) => {
let closure_ty = cx.tcx.expr_ty(expr);
let (def_id, substs) = match closure_ty.sty {
- ty::TyClosure(def_id, ref substs) => (def_id, substs),
+ ty::TyClosure(def_id, substs) => (def_id, substs),
_ => {
span_bug!(expr.span,
"closure expr w/o closure type: {:?}",
});
ExprKind::Closure {
closure_id: def_id,
- substs: &substs,
+ substs: substs,
upvars: upvars,
}
}
count: TypedConstVal {
ty: cx.tcx.expr_ty(c),
span: c.span,
- value: match const_eval::eval_const_expr(cx.tcx, c) {
+ value: match const_eval::eval_const_expr(cx.tcx.global_tcx(), c) {
ConstVal::Integral(ConstInt::Usize(u)) => u,
other => bug!("constant evaluation of repeat count yielded {:?}", other),
},
}
}
-fn method_callee<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
- expr: &hir::Expr,
- method_call: ty::MethodCall)
- -> Expr<'tcx> {
+fn method_callee<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &hir::Expr,
+ method_call: ty::MethodCall)
+ -> Expr<'tcx> {
let tables = cx.tcx.tables.borrow();
let callee = &tables.method_map[&method_call];
let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id);
}
}
-fn convert_arm<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, arm: &'tcx hir::Arm) -> Arm<'tcx> {
+fn convert_arm<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ arm: &'tcx hir::Arm) -> Arm<'tcx> {
let mut map;
let opt_map = if arm.pats.len() == 1 {
None
}
}
-fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) -> ExprKind<'tcx> {
- let substs = cx.tcx.mk_substs(cx.tcx.node_id_item_substs(expr.id).substs);
+fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr)
+ -> ExprKind<'tcx> {
+ let substs = cx.tcx.node_id_item_substs(expr.id).substs;
// Otherwise there may be def_map borrow conflicts
let def = cx.tcx.def_map.borrow()[&expr.id].full_def();
let def_id = match def {
Def::Const(def_id) |
Def::AssociatedConst(def_id) => {
let substs = Some(cx.tcx.node_id_item_substs(expr.id).substs);
- if let Some((e, _)) = const_eval::lookup_const_by_id(cx.tcx, def_id, substs) {
+ let tcx = cx.tcx.global_tcx();
+ if let Some((e, _)) = const_eval::lookup_const_by_id(tcx, def_id, substs) {
// FIXME ConstVal can't be yet used with adjustments, as they would be lost.
if !cx.tcx.tables.borrow().adjustments.contains_key(&e.id) {
if let Some(v) = cx.try_const_eval_literal(e) {
}
}
-fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- def: Def)
- -> ExprKind<'tcx> {
+fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr,
+ def: Def)
+ -> ExprKind<'tcx> {
let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id);
match def {
ByRef,
}
-fn overloaded_operator<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- method_call: ty::MethodCall,
- pass_args: PassArgs,
- receiver: ExprRef<'tcx>,
- args: Vec<&'tcx P<hir::Expr>>)
- -> ExprKind<'tcx> {
+fn overloaded_operator<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr,
+ method_call: ty::MethodCall,
+ pass_args: PassArgs,
+ receiver: ExprRef<'tcx>,
+ args: Vec<&'tcx P<hir::Expr>>)
+ -> ExprKind<'tcx> {
// the receiver has all the adjustments that are needed, so we can
// just push a reference to it
let mut argrefs = vec![receiver];
}
}
-fn overloaded_lvalue<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- method_call: ty::MethodCall,
- pass_args: PassArgs,
- receiver: ExprRef<'tcx>,
- args: Vec<&'tcx P<hir::Expr>>)
- -> ExprKind<'tcx> {
+fn overloaded_lvalue<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr,
+ method_call: ty::MethodCall,
+ pass_args: PassArgs,
+ receiver: ExprRef<'tcx>,
+ args: Vec<&'tcx P<hir::Expr>>)
+ -> ExprKind<'tcx> {
// For an overloaded *x or x[y] expression of type T, the method
// call returns an &T and we must add the deref so that the types
// line up (this is because `*x` and `x[y]` represent lvalues):
ExprKind::Deref { arg: ref_expr.to_ref() }
}
-fn capture_freevar<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>,
- closure_expr: &'tcx hir::Expr,
- freevar: &hir::Freevar,
- freevar_ty: Ty<'tcx>)
- -> ExprRef<'tcx> {
+fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ closure_expr: &'tcx hir::Expr,
+ freevar: &hir::Freevar,
+ freevar_ty: Ty<'tcx>)
+ -> ExprRef<'tcx> {
let id_var = freevar.def.var_id();
let upvar_id = ty::UpvarId {
var_id: id_var,
}
}
-fn loop_label<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) -> CodeExtent {
+fn loop_label<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr) -> CodeExtent {
match cx.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) {
Some(Def::Label(loop_id)) => cx.tcx.region_maps.node_extent(loop_id),
d => {
use rustc_const_math::{ConstInt, ConstUsize};
#[derive(Copy, Clone)]
-pub struct Cx<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx>,
+pub struct Cx<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
constness: hir::Constness
}
-impl<'a,'tcx> Cx<'a,'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>,
+impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
constness: hir::Constness)
- -> Cx<'a, 'tcx> {
+ -> Cx<'a, 'gcx, 'tcx> {
Cx {
tcx: infcx.tcx,
infcx: infcx,
}
}
-impl<'a,'tcx:'a> Cx<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> {
/// Normalizes `ast` into the appropriate `mirror` type.
pub fn mirror<M: Mirror<'tcx>>(&mut self, ast: M) -> M::Output {
ast.make_mirror(self)
}
pub fn const_eval_literal(&mut self, e: &hir::Expr) -> Literal<'tcx> {
- Literal::Value { value: const_eval::eval_const_expr(self.tcx, e) }
+ Literal::Value {
+ value: const_eval::eval_const_expr(self.tcx.global_tcx(), e)
+ }
}
pub fn try_const_eval_literal(&mut self, e: &hir::Expr) -> Option<Literal<'tcx>> {
let hint = const_eval::EvalHint::ExprTypeChecked;
- const_eval::eval_const_expr_partial(self.tcx, e, hint, None).ok().and_then(|v| {
+ let tcx = self.tcx.global_tcx();
+ const_eval::eval_const_expr_partial(tcx, e, hint, None).ok().and_then(|v| {
match v {
// All of these contain local IDs, unsuitable for storing in MIR.
ConstVal::Struct(_) | ConstVal::Tuple(_) |
bug!("found no method `{}` in `{:?}`", method_name, trait_def_id);
}
- pub fn num_variants(&mut self, adt_def: ty::AdtDef<'tcx>) -> usize {
+ pub fn num_variants(&mut self, adt_def: ty::AdtDef) -> usize {
adt_def.variants.len()
}
- pub fn all_fields(&mut self, adt_def: ty::AdtDef<'tcx>, variant_index: usize) -> Vec<Field> {
+ pub fn all_fields(&mut self, adt_def: ty::AdtDef, variant_index: usize) -> Vec<Field> {
(0..adt_def.variants[variant_index].fields.len())
.map(Field::new)
.collect()
}
pub fn needs_drop(&mut self, ty: Ty<'tcx>) -> bool {
+ let ty = self.tcx.lift_to_global(&ty).unwrap_or_else(|| {
+ bug!("MIR: Cx::needs_drop({}) got \
+ type with inference types/regions", ty);
+ });
self.tcx.type_needs_drop_given_env(ty, &self.infcx.parameter_environment)
}
- pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.tcx
}
}
/// _ => { ... }
/// }
/// ```
-struct PatCx<'patcx, 'cx: 'patcx, 'tcx: 'cx> {
- cx: &'patcx mut Cx<'cx, 'tcx>,
+struct PatCx<'patcx, 'cx: 'patcx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
+ cx: &'patcx mut Cx<'cx, 'gcx, 'tcx>,
binding_map: Option<&'patcx FnvHashMap<ast::Name, ast::NodeId>>,
}
-impl<'cx, 'tcx> Cx<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx> Cx<'cx, 'gcx, 'tcx> {
pub fn irrefutable_pat(&mut self, pat: &hir::Pat) -> Pattern<'tcx> {
PatCx::new(self, None).to_pattern(pat)
}
}
}
-impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
- fn new(cx: &'patcx mut Cx<'cx, 'tcx>,
+impl<'patcx, 'cx, 'gcx, 'tcx> PatCx<'patcx, 'cx, 'gcx, 'tcx> {
+ fn new(cx: &'patcx mut Cx<'cx, 'gcx, 'tcx>,
binding_map: Option<&'patcx FnvHashMap<ast::Name, ast::NodeId>>)
- -> PatCx<'patcx, 'cx, 'tcx> {
+ -> PatCx<'patcx, 'cx, 'gcx, 'tcx> {
PatCx {
cx: cx,
binding_map: binding_map,
PatKind::Wild => PatternKind::Wild,
PatKind::Lit(ref value) => {
- let value = const_eval::eval_const_expr(self.cx.tcx, value);
+ let value = const_eval::eval_const_expr(self.cx.tcx.global_tcx(), value);
PatternKind::Constant { value: value }
}
PatKind::Range(ref lo, ref hi) => {
- let lo = const_eval::eval_const_expr(self.cx.tcx, lo);
+ let lo = const_eval::eval_const_expr(self.cx.tcx.global_tcx(), lo);
let lo = Literal::Value { value: lo };
- let hi = const_eval::eval_const_expr(self.cx.tcx, hi);
+ let hi = const_eval::eval_const_expr(self.cx.tcx.global_tcx(), hi);
let hi = Literal::Value { value: hi };
PatternKind::Range { lo: lo, hi: hi }
},
let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
match def {
Def::Const(def_id) | Def::AssociatedConst(def_id) => {
+ let tcx = self.cx.tcx.global_tcx();
let substs = Some(self.cx.tcx.node_id_item_substs(pat.id).substs);
- match const_eval::lookup_const_by_id(self.cx.tcx, def_id, substs) {
+ match const_eval::lookup_const_by_id(tcx, def_id, substs) {
Some((const_expr, _const_ty)) => {
- match const_eval::const_expr_to_pat(self.cx.tcx,
+ match const_eval::const_expr_to_pat(tcx,
const_expr,
pat.id,
pat.span) {
},
Closure {
closure_id: DefId,
- substs: &'tcx ClosureSubsts<'tcx>,
+ substs: ClosureSubsts<'tcx>,
upvars: Vec<ExprRef<'tcx>>,
},
Literal {
pub trait Mirror<'tcx> {
type Output;
- fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Self::Output;
+ fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Self::Output;
}
impl<'tcx> Mirror<'tcx> for Expr<'tcx> {
type Output = Expr<'tcx>;
- fn make_mirror<'a>(self, _: &mut Cx<'a, 'tcx>) -> Expr<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> {
self
}
}
impl<'tcx> Mirror<'tcx> for ExprRef<'tcx> {
type Output = Expr<'tcx>;
- fn make_mirror<'a>(self, hir: &mut Cx<'a, 'tcx>) -> Expr<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, hir: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> {
match self {
ExprRef::Hair(h) => h.make_mirror(hir),
ExprRef::Mirror(m) => *m,
impl<'tcx> Mirror<'tcx> for Stmt<'tcx> {
type Output = Stmt<'tcx>;
- fn make_mirror<'a>(self, _: &mut Cx<'a, 'tcx>) -> Stmt<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Stmt<'tcx> {
self
}
}
impl<'tcx> Mirror<'tcx> for StmtRef<'tcx> {
type Output = Stmt<'tcx>;
- fn make_mirror<'a>(self, _: &mut Cx<'a,'tcx>) -> Stmt<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Stmt<'tcx> {
match self {
StmtRef::Mirror(m) => *m,
}
impl<'tcx> Mirror<'tcx> for Block<'tcx> {
type Output = Block<'tcx>;
- fn make_mirror<'a>(self, _: &mut Cx<'a, 'tcx>) -> Block<'tcx> {
+ fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Block<'tcx> {
self
}
}
use rustc::dep_graph::DepNode;
use rustc::mir::repr::Mir;
use rustc::mir::transform::MirSource;
+use rustc::mir::visit::MutVisitor;
use pretty;
use hair::cx::Cx;
use rustc::mir::mir_map::MirMap;
-use rustc::infer;
+use rustc::infer::InferCtxtBuilder;
use rustc::traits::ProjectionMode;
use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::subst::Substs;
use rustc::util::nodemap::NodeMap;
use rustc::hir;
use rustc::hir::intravisit::{self, FnKind, Visitor};
use syntax::ast;
use syntax::codemap::Span;
-pub fn build_mir_for_crate<'tcx>(tcx: &TyCtxt<'tcx>) -> MirMap<'tcx> {
+use std::mem;
+
+pub fn build_mir_for_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MirMap<'tcx> {
let mut map = MirMap {
map: NodeMap(),
};
map
}
+/// A pass to lift all the types and substitutions in a Mir
+/// to the global tcx. Sadly, we don't have a "folder" that
+/// can change 'tcx so we have to transmute afterwards.
+struct GlobalizeMir<'a, 'gcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'gcx>,
+ span: Span
+}
+
+impl<'a, 'gcx: 'tcx, 'tcx> MutVisitor<'tcx> for GlobalizeMir<'a, 'gcx> {
+ fn visit_ty(&mut self, ty: &mut Ty<'tcx>) {
+ if let Some(lifted) = self.tcx.lift(ty) {
+ *ty = lifted;
+ } else {
+ span_bug!(self.span,
+ "found type `{:?}` with inference types/regions in MIR",
+ ty);
+ }
+ }
+
+ fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>) {
+ if let Some(lifted) = self.tcx.lift(substs) {
+ *substs = lifted;
+ } else {
+ span_bug!(self.span,
+ "found substs `{:?}` with inference types/regions in MIR",
+ substs);
+ }
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// BuildMir -- walks a crate, looking for fn items and methods to build MIR from
struct BuildMir<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
map: &'a mut MirMap<'tcx>,
}
-impl<'a, 'tcx> BuildMir<'a, 'tcx> {
- fn build<F>(&mut self, src: MirSource, f: F)
- where F: for<'b> FnOnce(Cx<'b, 'tcx>) -> (Mir<'tcx>, build::ScopeAuxiliaryVec)
+/// Helper type of a temporary returned by BuildMir::cx(...).
+/// Necessary because we can't write the following bound:
+/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Cx<'b, 'gcx, 'tcx>).
+struct CxBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ src: MirSource,
+ infcx: InferCtxtBuilder<'a, 'gcx, 'tcx>,
+ map: &'a mut MirMap<'gcx>,
+}
+
+impl<'a, 'gcx, 'tcx> BuildMir<'a, 'gcx> {
+ fn cx<'b>(&'b mut self, src: MirSource) -> CxBuilder<'b, 'gcx, 'tcx> {
+ let param_env = ty::ParameterEnvironment::for_item(self.tcx, src.item_id());
+ CxBuilder {
+ src: src,
+ infcx: self.tcx.infer_ctxt(None, Some(param_env), ProjectionMode::AnyFinal),
+ map: self.map
+ }
+ }
+}
+
+impl<'a, 'gcx, 'tcx> CxBuilder<'a, 'gcx, 'tcx> {
+ fn build<F>(&'tcx mut self, f: F)
+ where F: for<'b> FnOnce(Cx<'b, 'gcx, 'tcx>) -> (Mir<'tcx>, build::ScopeAuxiliaryVec)
{
- let constness = match src {
- MirSource::Const(_) |
- MirSource::Static(..) => hir::Constness::Const,
- MirSource::Fn(id) => {
- let fn_like = FnLikeNode::from_node(self.tcx.map.get(id));
- match fn_like.map(|f| f.kind()) {
- Some(FnKind::ItemFn(_, _, _, c, _, _, _)) => c,
- Some(FnKind::Method(_, m, _, _)) => m.constness,
- _ => hir::Constness::NotConst
+ let src = self.src;
+ let mir = self.infcx.enter(|infcx| {
+ let constness = match src {
+ MirSource::Const(_) |
+ MirSource::Static(..) => hir::Constness::Const,
+ MirSource::Fn(id) => {
+ let fn_like = FnLikeNode::from_node(infcx.tcx.map.get(id));
+ match fn_like.map(|f| f.kind()) {
+ Some(FnKind::ItemFn(_, _, _, c, _, _, _)) => c,
+ Some(FnKind::Method(_, m, _, _)) => m.constness,
+ _ => hir::Constness::NotConst
+ }
}
- }
- MirSource::Promoted(..) => bug!()
- };
+ MirSource::Promoted(..) => bug!()
+ };
+ let (mut mir, scope_auxiliary) = f(Cx::new(&infcx, constness));
- let param_env = ty::ParameterEnvironment::for_item(self.tcx, src.item_id());
- let infcx = infer::new_infer_ctxt(self.tcx,
- &self.tcx.tables,
- Some(param_env),
- ProjectionMode::AnyFinal);
+ // Convert the Mir to global types.
+ let mut globalizer = GlobalizeMir {
+ tcx: infcx.tcx.global_tcx(),
+ span: mir.span
+ };
+ globalizer.visit_mir(&mut mir);
+ let mir = unsafe {
+ mem::transmute::<Mir, Mir<'gcx>>(mir)
+ };
- let (mir, scope_auxiliary) = f(Cx::new(&infcx, constness));
+ pretty::dump_mir(infcx.tcx.global_tcx(), "mir_map", &0,
+ src, &mir, Some(&scope_auxiliary));
- pretty::dump_mir(self.tcx, "mir_map", &0, src, &mir, Some(&scope_auxiliary));
+ mir
+ });
assert!(self.map.map.insert(src.item_id(), mir).is_none())
}
+}
- fn build_const_integer(&mut self, expr: &'tcx hir::Expr) {
+impl<'a, 'gcx> BuildMir<'a, 'gcx> {
+ fn build_const_integer(&mut self, expr: &'gcx hir::Expr) {
// FIXME(eddyb) Closures should have separate
// function definition IDs and expression IDs.
// Type-checking should not let closures get
if let hir::ExprClosure(..) = expr.node {
return;
}
- self.build(MirSource::Const(expr.id), |cx| {
+ self.cx(MirSource::Const(expr.id)).build(|cx| {
build::construct_const(cx, expr.id, expr)
});
}
fn visit_item(&mut self, item: &'tcx hir::Item) {
match item.node {
hir::ItemConst(_, ref expr) => {
- self.build(MirSource::Const(item.id), |cx| {
+ self.cx(MirSource::Const(item.id)).build(|cx| {
build::construct_const(cx, item.id, expr)
});
}
hir::ItemStatic(_, m, ref expr) => {
- self.build(MirSource::Static(item.id, m), |cx| {
+ self.cx(MirSource::Static(item.id, m)).build(|cx| {
build::construct_const(cx, item.id, expr)
});
}
// Trait associated const defaults.
fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) {
if let hir::ConstTraitItem(_, Some(ref expr)) = item.node {
- self.build(MirSource::Const(item.id), |cx| {
+ self.cx(MirSource::Const(item.id)).build(|cx| {
build::construct_const(cx, item.id, expr)
});
}
// Impl associated const.
fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) {
if let hir::ImplItemKind::Const(_, ref expr) = item.node {
- self.build(MirSource::Const(item.id), |cx| {
+ self.cx(MirSource::Const(item.id)).build(|cx| {
build::construct_const(cx, item.id, expr)
});
}
};
let implicit_argument = if let FnKind::Closure(..) = fk {
- Some((closure_self_ty(&self.tcx, id, body.id), None))
+ Some((closure_self_ty(self.tcx, id, body.id), None))
} else {
None
};
(fn_sig.inputs[index], Some(&*arg.pat))
});
- self.build(MirSource::Fn(id), |cx| {
- let arguments = implicit_argument.into_iter().chain(explicit_arguments);
+ let arguments = implicit_argument.into_iter().chain(explicit_arguments);
+ self.cx(MirSource::Fn(id)).build(|cx| {
build::construct_fn(cx, id, arguments, fn_sig.output, body)
});
}
}
-fn closure_self_ty<'a, 'tcx>(tcx: &TyCtxt<'tcx>,
+fn closure_self_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_expr_id: ast::NodeId,
body_id: ast::NodeId)
-> Ty<'tcx> {
/// - `substring1&substring2,...` -- `&`-separated list of substrings
/// that can appear in the pass-name or the `item_path_str` for the given
/// node-id. If any one of the substrings match, the data is dumped out.
-pub fn dump_mir<'a, 'tcx>(tcx: &TyCtxt<'tcx>,
+pub fn dump_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pass_name: &str,
disambiguator: &Display,
src: MirSource,
}
/// Write out a human-readable textual representation for the given MIR.
-pub fn write_mir_pretty<'a, 'tcx, I>(tcx: &TyCtxt<'tcx>,
- iter: I,
- w: &mut Write)
- -> io::Result<()>
+pub fn write_mir_pretty<'a, 'b, 'tcx, I>(tcx: TyCtxt<'b, 'tcx, 'tcx>,
+ iter: I,
+ w: &mut Write)
+ -> io::Result<()>
where I: Iterator<Item=(&'a NodeId, &'a Mir<'tcx>)>, 'tcx: 'a
{
for (&id, mir) in iter {
ExitScope(ScopeId),
}
-pub fn write_mir_fn<'tcx>(tcx: &TyCtxt<'tcx>,
- src: MirSource,
- mir: &Mir<'tcx>,
- w: &mut Write,
- auxiliary: Option<&ScopeAuxiliaryVec>)
- -> io::Result<()> {
+pub fn write_mir_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ src: MirSource,
+ mir: &Mir<'tcx>,
+ w: &mut Write,
+ auxiliary: Option<&ScopeAuxiliaryVec>)
+ -> io::Result<()> {
// compute scope/entry exit annotations
let mut annotations = FnvHashMap();
if let Some(auxiliary) = auxiliary {
}
/// Write out a human-readable textual representation for the given basic block.
-fn write_basic_block(tcx: &TyCtxt,
+fn write_basic_block(tcx: TyCtxt,
block: BasicBlock,
mir: &Mir,
w: &mut Write,
writeln!(w, "{}}}", INDENT)
}
-fn comment(tcx: &TyCtxt,
- scope: ScopeId,
- span: Span)
- -> String {
+fn comment(tcx: TyCtxt, scope: ScopeId, span: Span) -> String {
format!("Scope({}) at {}", scope.index(), tcx.sess.codemap().span_to_string(span))
}
-fn write_scope_tree(tcx: &TyCtxt,
+fn write_scope_tree(tcx: TyCtxt,
mir: &Mir,
auxiliary: Option<&ScopeAuxiliaryVec>,
scope_tree: &FnvHashMap<Option<ScopeId>, Vec<ScopeId>>,
/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
/// local variables (both user-defined bindings and compiler temporaries).
-fn write_mir_intro(tcx: &TyCtxt, src: MirSource, mir: &Mir, w: &mut Write)
- -> io::Result<()> {
+fn write_mir_intro<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ src: MirSource,
+ mir: &Mir,
+ w: &mut Write)
+ -> io::Result<()> {
match src {
MirSource::Fn(_) => write!(w, "fn")?,
MirSource::Const(_) => write!(w, "const")?,
*/
impl<'tcx> MirPass<'tcx> for BreakCriticalEdges {
- fn run_pass(&mut self, _: &TyCtxt<'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, _: TyCtxt<'a, 'tcx, 'tcx>,
+ _: MirSource, mir: &mut Mir<'tcx>) {
break_critical_edges(mir);
}
}
use rustc::mir::transform::{MirPass, MirSource, Pass};
struct EraseRegionsVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> {
- pub fn new(tcx: &'a TyCtxt<'tcx>) -> Self {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
EraseRegionsVisitor {
tcx: tcx
}
}
fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>) {
- *substs = self.tcx.mk_substs(self.tcx.erase_regions(*substs));
+ *substs = self.tcx.erase_regions(&{*substs});
}
}
impl Pass for EraseRegions {}
impl<'tcx> MirPass<'tcx> for EraseRegions {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ _: MirSource, mir: &mut Mir<'tcx>) {
EraseRegionsVisitor::new(tcx).visit_mir(mir);
}
}
}
impl<'tcx> MirPass<'tcx> for NoLandingPads {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ _: MirSource, mir: &mut Mir<'tcx>) {
if tcx.sess.no_landing_pads() {
self.visit_mir(mir);
}
}
}
-pub fn promote_candidates<'tcx>(mir: &mut Mir<'tcx>,
- tcx: &TyCtxt<'tcx>,
- mut temps: Vec<TempState>,
- candidates: Vec<Candidate>) {
+pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ mut temps: Vec<TempState>,
+ candidates: Vec<Candidate>) {
// Visit candidates in reverse, in case they're nested.
for candidate in candidates.into_iter().rev() {
let (span, ty) = match candidate {
use rustc::hir::def_id::DefId;
use rustc::hir::intravisit::FnKind;
use rustc::hir::map::blocks::FnLikeNode;
-use rustc::infer;
use rustc::traits::{self, ProjectionMode};
use rustc::ty::{self, TyCtxt, Ty};
use rustc::ty::cast::CastTy;
}
}
-impl Qualif {
+impl<'a, 'tcx> Qualif {
/// Remove flags which are impossible for the given type.
- fn restrict<'a, 'tcx>(&mut self, ty: Ty<'tcx>,
- param_env: &ty::ParameterEnvironment<'a, 'tcx>) {
- if !ty.type_contents(param_env.tcx).interior_unsafe() {
+ fn restrict(&mut self, ty: Ty<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>) {
+ if !ty.type_contents(tcx).interior_unsafe() {
*self = *self - Qualif::MUTABLE_INTERIOR;
}
- if !param_env.tcx.type_needs_drop_given_env(ty, param_env) {
+ if !tcx.type_needs_drop_given_env(ty, param_env) {
*self = *self - Qualif::NEEDS_DROP;
}
}
}
}
-fn is_const_fn(tcx: &TyCtxt, def_id: DefId) -> bool {
+fn is_const_fn(tcx: TyCtxt, def_id: DefId) -> bool {
if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
let fn_like = FnLikeNode::from_node(tcx.map.get(node_id));
match fn_like.map(|f| f.kind()) {
}
}
-struct Qualifier<'a, 'tcx: 'a> {
+struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
mode: Mode,
span: Span,
def_id: DefId,
mir: &'a Mir<'tcx>,
rpo: ReversePostorder<'a, 'tcx>,
- tcx: &'a TyCtxt<'tcx>,
- param_env: ty::ParameterEnvironment<'a, 'tcx>,
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ param_env: ty::ParameterEnvironment<'tcx>,
qualif_map: &'a mut DefIdMap<Qualif>,
mir_map: Option<&'a MirMap<'tcx>>,
temp_qualif: Vec<Option<Qualif>>,
promotion_candidates: Vec<Candidate>
}
-impl<'a, 'tcx> Qualifier<'a, 'tcx> {
- fn new(param_env: ty::ParameterEnvironment<'a, 'tcx>,
+impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> {
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: ty::ParameterEnvironment<'tcx>,
qualif_map: &'a mut DefIdMap<Qualif>,
mir_map: Option<&'a MirMap<'tcx>>,
def_id: DefId,
mir: &'a Mir<'tcx>,
mode: Mode)
- -> Qualifier<'a, 'tcx> {
+ -> Qualifier<'a, 'tcx, 'tcx> {
let mut rpo = traversal::reverse_postorder(mir);
let temps = promote_consts::collect_temps(mir, &mut rpo);
rpo.reset();
def_id: def_id,
mir: mir,
rpo: rpo,
- tcx: param_env.tcx,
+ tcx: tcx,
param_env: param_env,
qualif_map: qualif_map,
mir_map: mir_map,
/// Add the given type's qualification to self.qualif.
fn add_type(&mut self, ty: Ty<'tcx>) {
self.add(Qualif::MUTABLE_INTERIOR | Qualif::NEEDS_DROP);
- self.qualif.restrict(ty, &self.param_env);
+ self.qualif.restrict(ty, self.tcx, &self.param_env);
}
/// Within the provided closure, self.qualif will start
/// Accumulates an Rvalue or Call's effects in self.qualif.
/// For functions (constant or not), it also records
/// candidates for promotion in promotion_candidates.
-impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx> {
+impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext) {
match *lvalue {
Lvalue::Arg(_) => {
}
let ty = this.mir.lvalue_ty(this.tcx, lvalue)
.to_ty(this.tcx);
- this.qualif.restrict(ty, &this.param_env);
+ this.qualif.restrict(ty, this.tcx, &this.param_env);
}
ProjectionElem::ConstantIndex {..} |
}
}
-fn qualify_const_item_cached<'tcx>(tcx: &TyCtxt<'tcx>,
- qualif_map: &mut DefIdMap<Qualif>,
- mir_map: Option<&MirMap<'tcx>>,
- def_id: DefId)
- -> Qualif {
+fn qualify_const_item_cached<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ qualif_map: &mut DefIdMap<Qualif>,
+ mir_map: Option<&MirMap<'tcx>>,
+ def_id: DefId)
+ -> Qualif {
match qualif_map.entry(def_id) {
Entry::Occupied(entry) => return *entry.get(),
Entry::Vacant(entry) => {
bug!("missing constant MIR for {}", tcx.item_path_str(def_id))
});
- let mut qualifier = Qualifier::new(param_env, qualif_map, mir_map,
+ let mut qualifier = Qualifier::new(tcx, param_env, qualif_map, mir_map,
def_id, mir, Mode::Const);
let qualif = qualifier.qualify_const();
qualifier.qualif_map.insert(def_id, qualif);
impl Pass for QualifyAndPromoteConstants {}
impl<'tcx> MirMapPass<'tcx> for QualifyAndPromoteConstants {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, map: &mut MirMap<'tcx>) {
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>) {
let mut qualif_map = DefIdMap();
// First, visit `const` items, potentially recursing, to get
// This is ugly because Qualifier holds onto mir,
// which can't be mutated until its scope ends.
let (temps, candidates) = {
- let mut qualifier = Qualifier::new(param_env, &mut qualif_map,
+ let mut qualifier = Qualifier::new(tcx, param_env, &mut qualif_map,
None, def_id, mir, mode);
if mode == Mode::ConstFn {
// Enforce a constant-like CFG for `const fn`.
// Do the actual promotion, now that we know what's viable.
promote_consts::promote_candidates(mir, tcx, temps, candidates);
} else {
- let mut qualifier = Qualifier::new(param_env, &mut qualif_map,
+ let mut qualifier = Qualifier::new(tcx, param_env, &mut qualif_map,
None, def_id, mir, mode);
qualifier.qualify_const();
}
// Statics must be Sync.
if mode == Mode::Static {
let ty = mir.return_ty.unwrap();
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- None,
- ProjectionMode::AnyFinal);
- let cause = traits::ObligationCause::new(mir.span, id, traits::SharedStatic);
- let mut fulfillment_cx = traits::FulfillmentContext::new();
- fulfillment_cx.register_builtin_bound(&infcx, ty, ty::BoundSync, cause);
- if let Err(err) = fulfillment_cx.select_all_or_error(&infcx) {
- traits::report_fulfillment_errors(&infcx, &err);
- }
+ tcx.infer_ctxt(None, None, ProjectionMode::AnyFinal).enter(|infcx| {
+ let cause = traits::ObligationCause::new(mir.span, id, traits::SharedStatic);
+ let mut fulfillment_cx = traits::FulfillmentContext::new();
+ fulfillment_cx.register_builtin_bound(&infcx, ty, ty::BoundSync, cause);
+ if let Err(err) = fulfillment_cx.select_all_or_error(&infcx) {
+ infcx.report_fulfillment_errors(&err);
+ }
- if let Err(ref errors) = fulfillment_cx.select_rfc1592_obligations(&infcx) {
- traits::report_fulfillment_errors_as_warnings(&infcx, errors, id);
- }
+ if let Err(errors) = fulfillment_cx.select_rfc1592_obligations(&infcx) {
+ infcx.report_fulfillment_errors_as_warnings(&errors, id);
+ }
+ });
}
}
}
pub struct RemoveDeadBlocks;
impl<'tcx> MirPass<'tcx> for RemoveDeadBlocks {
- fn run_pass(&mut self, _: &TyCtxt<'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, _: TyCtxt<'a, 'tcx, 'tcx>,
+ _: MirSource, mir: &mut Mir<'tcx>) {
let mut seen = BitVector::new(mir.basic_blocks.len());
// This block is always required.
seen.insert(START_BLOCK.index());
}
impl<'tcx> MirPass<'tcx> for SimplifyCfg {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, src: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ src: MirSource, mir: &mut Mir<'tcx>) {
let mut counter = 0;
let mut changed = true;
while changed {
/// The sanitize_XYZ methods here take an MIR object and compute its
/// type, calling `span_mirbug` and returning an error type if there
/// is a problem.
-struct TypeVerifier<'a, 'b: 'a, 'tcx: 'b> {
- cx: &'a mut TypeChecker<'b, 'tcx>,
+struct TypeVerifier<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
+ cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
last_span: Span,
errors_reported: bool
}
-impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
+impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> {
fn visit_span(&mut self, span: &Span) {
if *span != DUMMY_SP {
self.last_span = *span;
}
}
-impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
- fn new(cx: &'a mut TypeChecker<'b, 'tcx>, mir: &'a Mir<'tcx>) -> Self {
+impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
+ fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self {
TypeVerifier {
cx: cx,
mir: mir,
}
}
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.cx.infcx.tcx
}
- fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> {
+ fn infcx(&self) -> &'a InferCtxt<'a, 'gcx, 'tcx> {
self.cx.infcx
}
let fty = self.sanitize_type(lvalue, fty);
match self.field_ty(lvalue, base, field) {
Ok(ty) => {
- if let Err(terr) = self.cx.mk_eqty(span, ty, fty) {
+ if let Err(terr) = self.cx.eq_types(span, ty, fty) {
span_mirbug!(
self, lvalue, "bad field access ({:?}: {:?}): {:?}",
ty, fty, terr);
if adt_def.is_univariant() => {
(&adt_def.variants[0], substs)
}
- ty::TyTuple(ref tys) | ty::TyClosure(_, box ty::ClosureSubsts {
- upvar_tys: ref tys, ..
+ ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts {
+ upvar_tys: tys, ..
}) => {
return match tys.get(field.index()) {
Some(&ty) => Ok(ty),
}
}
-pub struct TypeChecker<'a, 'tcx: 'a> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+pub struct TypeChecker<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
fulfillment_cx: traits::FulfillmentContext<'tcx>,
last_span: Span
}
-impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
- fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
+ fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self {
TypeChecker {
infcx: infcx,
fulfillment_cx: traits::FulfillmentContext::new(),
}
}
- fn mk_subty(&self, span: Span, sup: Ty<'tcx>, sub: Ty<'tcx>)
- -> infer::UnitResult<'tcx>
+ fn sub_types(&self, span: Span, sup: Ty<'tcx>, sub: Ty<'tcx>)
+ -> infer::UnitResult<'tcx>
{
- infer::mk_subty(self.infcx, false, infer::TypeOrigin::Misc(span),
- sup, sub)
+ self.infcx.sub_types(false, infer::TypeOrigin::Misc(span), sup, sub)
// FIXME(#32730) propagate obligations
.map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
}
- fn mk_eqty(&self, span: Span, a: Ty<'tcx>, b: Ty<'tcx>)
+ fn eq_types(&self, span: Span, a: Ty<'tcx>, b: Ty<'tcx>)
-> infer::UnitResult<'tcx>
{
- infer::mk_eqty(self.infcx, false, infer::TypeOrigin::Misc(span),
- a, b)
+ self.infcx.eq_types(false, infer::TypeOrigin::Misc(span), a, b)
// FIXME(#32730) propagate obligations
.map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
}
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
+ fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.infcx.tcx
}
let lv_ty = mir.lvalue_ty(tcx, lv).to_ty(tcx);
let rv_ty = mir.rvalue_ty(tcx, rv);
if let Some(rv_ty) = rv_ty {
- if let Err(terr) = self.mk_subty(self.last_span, rv_ty, lv_ty) {
+ if let Err(terr) = self.sub_types(self.last_span, rv_ty, lv_ty) {
span_mirbug!(self, stmt, "bad assignment ({:?} = {:?}): {:?}",
lv_ty, rv_ty, terr);
}
}
TerminatorKind::SwitchInt { ref discr, switch_ty, .. } => {
let discr_ty = mir.lvalue_ty(tcx, discr).to_ty(tcx);
- if let Err(terr) = self.mk_subty(self.last_span, discr_ty, switch_ty) {
+ if let Err(terr) = self.sub_types(self.last_span, discr_ty, switch_ty) {
span_mirbug!(self, term, "bad SwitchInt ({:?} on {:?}): {:?}",
switch_ty, discr_ty, terr);
}
}
(&Some((ref dest, _)), ty::FnConverging(ty)) => {
let dest_ty = mir.lvalue_ty(tcx, dest).to_ty(tcx);
- if let Err(terr) = self.mk_subty(self.last_span, ty, dest_ty) {
+ if let Err(terr) = self.sub_types(self.last_span, ty, dest_ty) {
span_mirbug!(self, term,
"call dest mismatch ({:?} <- {:?}): {:?}",
dest_ty, ty, terr);
}
for (n, (fn_arg, op_arg)) in sig.inputs.iter().zip(args).enumerate() {
let op_arg_ty = mir.operand_ty(self.tcx(), op_arg);
- if let Err(terr) = self.mk_subty(self.last_span, op_arg_ty, fn_arg) {
+ if let Err(terr) = self.sub_types(self.last_span, op_arg_ty, fn_arg) {
span_mirbug!(self, term, "bad arg #{:?} ({:?} <- {:?}): {:?}",
n, fn_arg, op_arg_ty, terr);
}
}
};
- if let Err(terr) = self.mk_subty(self.last_span, arg_ty, pointee_ty) {
+ if let Err(terr) = self.sub_types(self.last_span, arg_ty, pointee_ty) {
span_mirbug!(self, term, "bad box_free arg ({:?} <- {:?}): {:?}",
pointee_ty, arg_ty, terr);
}
}
impl<'tcx> MirPass<'tcx> for TypeckMir {
- fn run_pass(&mut self, tcx: &TyCtxt<'tcx>, src: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ src: MirSource, mir: &mut Mir<'tcx>) {
if tcx.sess.err_count() > 0 {
// compiling a broken program can obviously result in a
// broken MIR, so try not to report duplicate errors.
return;
}
let param_env = ty::ParameterEnvironment::for_item(tcx, src.item_id());
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- Some(param_env),
- ProjectionMode::AnyFinal);
- let mut checker = TypeChecker::new(&infcx);
- {
- let mut verifier = TypeVerifier::new(&mut checker, mir);
- verifier.visit_mir(mir);
- if verifier.errors_reported {
- // don't do further checks to avoid ICEs
- return;
+ tcx.infer_ctxt(None, Some(param_env), ProjectionMode::AnyFinal).enter(|infcx| {
+ let mut checker = TypeChecker::new(&infcx);
+ {
+ let mut verifier = TypeVerifier::new(&mut checker, mir);
+ verifier.visit_mir(mir);
+ if verifier.errors_reported {
+ // don't do further checks to avoid ICEs
+ return;
+ }
}
- }
- checker.typeck_mir(mir);
- checker.verify_obligations(mir);
+ checker.typeck_mir(mir);
+ checker.verify_obligations(mir);
+ });
}
}
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use rustc::middle::expr_use_visitor as euv;
-use rustc::infer;
use rustc::middle::mem_categorization as mc;
use rustc::middle::mem_categorization::Categorization;
use rustc::ty::{self, Ty, TyCtxt};
}
struct CheckCrateVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
mode: Mode,
qualif: ConstQualif,
rvalue_borrows: NodeMap<hir::Mutability>
}
-impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
+impl<'a, 'gcx> CheckCrateVisitor<'a, 'gcx> {
fn with_mode<F, R>(&mut self, mode: Mode, f: F) -> R where
- F: FnOnce(&mut CheckCrateVisitor<'a, 'tcx>) -> R,
+ F: FnOnce(&mut CheckCrateVisitor<'a, 'gcx>) -> R,
{
let (old_mode, old_qualif) = (self.mode, self.qualif);
self.mode = mode;
r
}
- fn with_euv<'b, F, R>(&'b mut self, item_id: Option<ast::NodeId>, f: F) -> R where
- F: for<'t> FnOnce(&mut euv::ExprUseVisitor<'b, 't, 'b, 'tcx>) -> R,
+ fn with_euv<F, R>(&mut self, item_id: Option<ast::NodeId>, f: F) -> R where
+ F: for<'b, 'tcx> FnOnce(&mut euv::ExprUseVisitor<'b, 'gcx, 'tcx>) -> R,
{
let param_env = match item_id {
Some(item_id) => ty::ParameterEnvironment::for_item(self.tcx, item_id),
None => self.tcx.empty_parameter_environment()
};
- let infcx = infer::new_infer_ctxt(self.tcx,
- &self.tcx.tables,
- Some(param_env),
- ProjectionMode::AnyFinal);
-
- f(&mut euv::ExprUseVisitor::new(self, &infcx))
+ self.tcx.infer_ctxt(None, Some(param_env), ProjectionMode::AnyFinal).enter(|infcx| {
+ f(&mut euv::ExprUseVisitor::new(self, &infcx))
+ })
}
fn global_expr(&mut self, mode: Mode, expr: &hir::Expr) -> ConstQualif {
fn handle_const_fn_call(&mut self,
_expr: &hir::Expr,
def_id: DefId,
- ret_ty: Ty<'tcx>)
+ ret_ty: Ty<'gcx>)
-> bool {
if let Some(fn_like) = lookup_const_fn_by_id(self.tcx, def_id) {
let qualif = self.fn_like(fn_like.kind(),
}
}
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
tcx.visit_all_items_in_krate(DepNode::CheckConst, &mut CheckCrateVisitor {
tcx: tcx,
mode: Mode::Var,
tcx.sess.abort_if_errors();
}
-impl<'a, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> {
fn consume(&mut self,
_consume_id: ast::NodeId,
_consume_span: Span,
use rustc::dep_graph::DepNode;
use rustc::middle::expr_use_visitor as euv;
-use rustc::infer;
use rustc::middle::mem_categorization as mc;
use rustc::ty::{self, TyCtxt, ParameterEnvironment};
use rustc::traits::ProjectionMode;
use syntax::ast;
use syntax::codemap::Span;
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut rvcx = RvalueContext { tcx: tcx };
tcx.visit_all_items_in_krate(DepNode::RvalueCheck, &mut rvcx);
}
struct RvalueContext<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for RvalueContext<'a, 'tcx> {
b: &'v hir::Block,
s: Span,
fn_id: ast::NodeId) {
- {
- // FIXME (@jroesch) change this to be an inference context
- let param_env = ParameterEnvironment::for_item(self.tcx, fn_id);
- let infcx = infer::new_infer_ctxt(self.tcx,
- &self.tcx.tables,
- Some(param_env.clone()),
- ProjectionMode::AnyFinal);
- let mut delegate = RvalueContextDelegate { tcx: self.tcx, param_env: ¶m_env };
+ // FIXME (@jroesch) change this to be an inference context
+ let param_env = ParameterEnvironment::for_item(self.tcx, fn_id);
+ self.tcx.infer_ctxt(None, Some(param_env.clone()),
+ ProjectionMode::AnyFinal).enter(|infcx| {
+ let mut delegate = RvalueContextDelegate {
+ tcx: infcx.tcx,
+ param_env: ¶m_env
+ };
let mut euv = euv::ExprUseVisitor::new(&mut delegate, &infcx);
euv.walk_fn(fd, b);
- }
+ });
intravisit::walk_fn(self, fk, fd, b, s)
}
}
-struct RvalueContextDelegate<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
- param_env: &'a ty::ParameterEnvironment<'a,'tcx>,
+struct RvalueContextDelegate<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ param_env: &'a ty::ParameterEnvironment<'gcx>,
}
-impl<'a, 'tcx> euv::Delegate<'tcx> for RvalueContextDelegate<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for RvalueContextDelegate<'a, 'gcx, 'tcx> {
fn consume(&mut self,
_: ast::NodeId,
span: Span,
cmt: mc::cmt<'tcx>,
_: euv::ConsumeMode) {
debug!("consume; cmt: {:?}; type: {:?}", *cmt, cmt.ty);
- if !cmt.ty.is_sized(self.param_env, span) {
+ let ty = self.tcx.lift_to_global(&cmt.ty).unwrap();
+ if !ty.is_sized(self.tcx.global_tcx(), self.param_env, span) {
span_err!(self.tcx.sess, span, E0161,
"cannot move a value of type {0}: the size of {0} cannot be statically determined",
- cmt.ty);
+ ty);
}
}
////////////////////////////////////////////////////////////////////////////////
struct EmbargoVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
export_map: &'a def::ExportMap,
// Accessibility levels for reachable nodes
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, item: &hir::Item) {
////////////////////////////////////////////////////////////////////////////////
struct PrivacyVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
curitem: ast::NodeId,
in_foreign: bool,
}
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, item: &hir::Item) {
////////////////////////////////////////////////////////////////////////////////
struct SanePrivacyVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx, 'v> Visitor<'v> for SanePrivacyVisitor<'a, 'tcx> {
///////////////////////////////////////////////////////////////////////////////
struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
access_levels: &'a AccessLevels,
in_variant: bool,
// set of errors produced by this obsolete visitor
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.tcx.map.expect_item(item.id))
+ let tcx = self.tcx;
+ self.visit_item(tcx.map.expect_item(item.id))
}
fn visit_item(&mut self, item: &hir::Item) {
///////////////////////////////////////////////////////////////////////////////
struct SearchInterfaceForPrivateItemsVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// The visitor checks that each component type is at least this visible
required_visibility: ty::Visibility,
/// The visibility of the least visible component that has been visited
}
impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>, old_error_set: &'a NodeSet) -> Self {
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, old_error_set: &'a NodeSet) -> Self {
SearchInterfaceForPrivateItemsVisitor {
tcx: tcx,
min_visibility: ty::Visibility::Public,
let item = self.tcx.map.expect_item(node_id);
let vis = match self.substituted_alias_visibility(item, path) {
Some(vis) => vis,
- None => ty::Visibility::from_hir(&item.vis, node_id, &self.tcx),
+ None => ty::Visibility::from_hir(&item.vis, node_id, self.tcx),
};
if !vis.is_at_least(self.min_visibility, &self.tcx.map) {
let def_id = self.tcx.trait_ref_to_def_id(trait_ref);
if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) {
let item = self.tcx.map.expect_item(node_id);
- let vis = ty::Visibility::from_hir(&item.vis, node_id, &self.tcx);
+ let vis = ty::Visibility::from_hir(&item.vis, node_id, self.tcx);
if !vis.is_at_least(self.min_visibility, &self.tcx.map) {
self.min_visibility = vis;
}
struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
old_error_set: &'a NodeSet,
}
};
let mut check = SearchInterfaceForPrivateItemsVisitor::new(self.tcx, self.old_error_set);
- let item_visibility = ty::Visibility::from_hir(&item.vis, item.id, &self.tcx);
+ let item_visibility = ty::Visibility::from_hir(&item.vis, item.id, self.tcx);
match item.node {
// Crates are always public
hir::ItemForeignMod(ref foreign_mod) => {
for foreign_item in &foreign_mod.items {
check.required_visibility =
- ty::Visibility::from_hir(&foreign_item.vis, item.id, &self.tcx);
+ ty::Visibility::from_hir(&foreign_item.vis, item.id, self.tcx);
check.visit_foreign_item(foreign_item);
}
}
check.visit_generics(generics);
for field in struct_def.fields() {
- let field_visibility = ty::Visibility::from_hir(&field.vis, item.id, &self.tcx);
+ let field_visibility = ty::Visibility::from_hir(&field.vis, item.id, self.tcx);
check.required_visibility = min(item_visibility, field_visibility);
check.visit_struct_field(field);
}
for impl_item in impl_items {
let impl_item_vis =
- ty::Visibility::from_hir(&impl_item.vis, item.id, &self.tcx);
+ ty::Visibility::from_hir(&impl_item.vis, item.id, self.tcx);
check.required_visibility = min(impl_item_vis, ty_vis);
check.visit_impl_item(impl_item);
}
}
}
-pub fn check_crate(tcx: &TyCtxt, export_map: &def::ExportMap) -> AccessLevels {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ export_map: &def::ExportMap)
+ -> AccessLevels {
let _task = tcx.dep_graph.in_task(DepNode::Privacy);
let krate = tcx.map.krate();
use syntax::ast::{Name, NodeId};
use syntax::attr::AttrMetaMethods;
-use syntax::parse::token::keywords;
+use syntax::parse::token::{self, keywords};
use syntax::codemap::{Span, DUMMY_SP};
-use rustc::hir;
-use rustc::hir::{Block, DeclItem};
-use rustc::hir::{ForeignItem, ForeignItemFn, ForeignItemStatic};
-use rustc::hir::{Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn};
-use rustc::hir::{ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
-use rustc::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
-use rustc::hir::{PathListIdent, PathListMod, StmtDecl};
-use rustc::hir::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple};
-use rustc::hir::intravisit::{self, Visitor};
+use syntax::ast::{Block, Crate, DeclKind};
+use syntax::ast::{ForeignItem, ForeignItemKind, Item, ItemKind};
+use syntax::ast::{Mutability, PathListItemKind};
+use syntax::ast::{SelfKind, Stmt, StmtKind, TraitItemKind};
+use syntax::ast::{Variant, ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple};
+use syntax::visit::{self, Visitor};
trait ToNameBinding<'a> {
fn to_name_binding(self) -> NameBinding<'a>;
}
}
-impl<'b, 'tcx:'b> Resolver<'b, 'tcx> {
+impl<'b> Resolver<'b> {
/// Constructs the reduced graph for the entire crate.
- pub fn build_reduced_graph(&mut self, krate: &hir::Crate) {
+ pub fn build_reduced_graph(&mut self, krate: &Crate) {
let mut visitor = BuildReducedGraphVisitor {
parent: self.graph_root,
resolver: self,
};
- intravisit::walk_crate(&mut visitor, krate);
+ visit::walk_crate(&mut visitor, krate);
}
/// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined.
}
fn block_needs_anonymous_module(&mut self, block: &Block) -> bool {
- fn is_item(statement: &hir::Stmt) -> bool {
- if let StmtDecl(ref declaration, _) = statement.node {
- if let DeclItem(_) = declaration.node {
+ fn is_item(statement: &Stmt) -> bool {
+ if let StmtKind::Decl(ref declaration, _) = statement.node {
+ if let DeclKind::Item(_) = declaration.node {
return true;
}
}
block.stmts.iter().any(is_item)
}
- fn sanity_check_import(&self, view_path: &hir::ViewPath, id: NodeId) {
+ fn sanity_check_import(&self, view_path: &ViewPath, id: NodeId) {
let path = match view_path.node {
ViewPathSimple(_, ref path) |
ViewPathGlob (ref path) |
/// Constructs the reduced graph for one item.
fn build_reduced_graph_for_item(&mut self, item: &Item, parent_ref: &mut Module<'b>) {
let parent = *parent_ref;
- let name = item.name;
+ let name = item.ident.name;
let sp = item.span;
self.current_module = parent;
let vis = self.resolve_visibility(&item.vis);
match item.node {
- ItemUse(ref view_path) => {
+ ItemKind::Use(ref view_path) => {
// Extract and intern the module part of the path. For
// globs and lists, the path is found directly in the AST;
// for simple paths we have to munge the path a little.
ResolutionError::SelfImportsOnlyAllowedWithin);
}
- let subclass = ImportDirectiveSubclass::single(binding, source_name);
+ let subclass = ImportDirectiveSubclass::single(binding.name, source_name);
let span = view_path.span;
parent.add_import_directive(module_path, subclass, span, item.id, vis);
self.unresolved_imports += 1;
}
ViewPathList(_, ref source_items) => {
// Make sure there's at most one `mod` import in the list.
- let mod_spans = source_items.iter()
- .filter_map(|item| {
- match item.node {
- PathListMod { .. } => Some(item.span),
- _ => None,
- }
- })
- .collect::<Vec<Span>>();
+ let mod_spans = source_items.iter().filter_map(|item| {
+ match item.node {
+ PathListItemKind::Mod { .. } => Some(item.span),
+ _ => None,
+ }
+ }).collect::<Vec<Span>>();
+
if mod_spans.len() > 1 {
let mut e = resolve_struct_error(self,
mod_spans[0],
for source_item in source_items {
let (module_path, name, rename) = match source_item.node {
- PathListIdent { name, rename, .. } =>
- (module_path.clone(), name, rename.unwrap_or(name)),
- PathListMod { rename, .. } => {
+ PathListItemKind::Ident { name, rename, .. } =>
+ (module_path.clone(), name.name, rename.unwrap_or(name).name),
+ PathListItemKind::Mod { rename, .. } => {
let name = match module_path.last() {
Some(name) => *name,
None => {
}
};
let module_path = module_path.split_last().unwrap().1;
- let rename = rename.unwrap_or(name);
+ let rename = rename.map(|i| i.name).unwrap_or(name);
(module_path.to_vec(), name, rename)
}
};
}
}
- ItemExternCrate(_) => {
+ ItemKind::ExternCrate(_) => {
// n.b. we don't need to look at the path option here, because cstore already
// did
if let Some(crate_id) = self.session.cstore.extern_mod_stmt_cnum(item.id) {
}
}
- ItemMod(..) => {
+ ItemKind::Mod(..) => {
let parent_link = ModuleParentLink(parent, name);
- let def = Def::Mod(self.ast_map.local_def_id(item.id));
+ let def = Def::Mod(self.definitions.local_def_id(item.id));
let module = self.new_module(parent_link, Some(def), false);
self.define(parent, name, TypeNS, (module, sp, vis));
self.module_map.insert(item.id, module);
*parent_ref = module;
}
- ItemForeignMod(..) => {}
+ ItemKind::ForeignMod(..) => {}
// These items live in the value namespace.
- ItemStatic(_, m, _) => {
- let mutbl = m == hir::MutMutable;
- let def = Def::Static(self.ast_map.local_def_id(item.id), mutbl);
+ ItemKind::Static(_, m, _) => {
+ let mutbl = m == Mutability::Mutable;
+ let def = Def::Static(self.definitions.local_def_id(item.id), mutbl);
self.define(parent, name, ValueNS, (def, sp, vis));
}
- ItemConst(_, _) => {
- let def = Def::Const(self.ast_map.local_def_id(item.id));
+ ItemKind::Const(_, _) => {
+ let def = Def::Const(self.definitions.local_def_id(item.id));
self.define(parent, name, ValueNS, (def, sp, vis));
}
- ItemFn(_, _, _, _, _, _) => {
- let def = Def::Fn(self.ast_map.local_def_id(item.id));
+ ItemKind::Fn(_, _, _, _, _, _) => {
+ let def = Def::Fn(self.definitions.local_def_id(item.id));
self.define(parent, name, ValueNS, (def, sp, vis));
}
// These items live in the type namespace.
- ItemTy(..) => {
- let def = Def::TyAlias(self.ast_map.local_def_id(item.id));
+ ItemKind::Ty(..) => {
+ let def = Def::TyAlias(self.definitions.local_def_id(item.id));
self.define(parent, name, TypeNS, (def, sp, vis));
}
- ItemEnum(ref enum_definition, _) => {
+ ItemKind::Enum(ref enum_definition, _) => {
let parent_link = ModuleParentLink(parent, name);
- let def = Def::Enum(self.ast_map.local_def_id(item.id));
+ let def = Def::Enum(self.definitions.local_def_id(item.id));
let module = self.new_module(parent_link, Some(def), false);
self.define(parent, name, TypeNS, (module, sp, vis));
for variant in &(*enum_definition).variants {
- let item_def_id = self.ast_map.local_def_id(item.id);
+ let item_def_id = self.definitions.local_def_id(item.id);
self.build_reduced_graph_for_variant(variant, item_def_id, module, vis);
}
}
// These items live in both the type and value namespaces.
- ItemStruct(ref struct_def, _) => {
+ ItemKind::Struct(ref struct_def, _) => {
// Define a name in the type namespace.
- let def = Def::Struct(self.ast_map.local_def_id(item.id));
+ let def = Def::Struct(self.definitions.local_def_id(item.id));
self.define(parent, name, TypeNS, (def, sp, vis));
// If this is a newtype or unit-like struct, define a name
// in the value namespace as well
if !struct_def.is_struct() {
- let def = Def::Struct(self.ast_map.local_def_id(struct_def.id()));
+ let def = Def::Struct(self.definitions.local_def_id(struct_def.id()));
self.define(parent, name, ValueNS, (def, sp, vis));
}
// Record the def ID and fields of this struct.
- let field_names = struct_def.fields().iter().map(|field| {
+ let field_names = struct_def.fields().iter().enumerate().map(|(index, field)| {
self.resolve_visibility(&field.vis);
- field.name
+ field.ident.map(|ident| ident.name)
+ .unwrap_or_else(|| token::intern(&index.to_string()))
}).collect();
- let item_def_id = self.ast_map.local_def_id(item.id);
+ let item_def_id = self.definitions.local_def_id(item.id);
self.structs.insert(item_def_id, field_names);
}
- ItemDefaultImpl(_, _) | ItemImpl(..) => {}
+ ItemKind::DefaultImpl(_, _) | ItemKind::Impl(..) => {}
- ItemTrait(_, _, _, ref items) => {
- let def_id = self.ast_map.local_def_id(item.id);
+ ItemKind::Trait(_, _, _, ref items) => {
+ let def_id = self.definitions.local_def_id(item.id);
// Add all the items within to a new module.
let parent_link = ModuleParentLink(parent, name);
// Add the names of all the items to the trait info.
for item in items {
- let item_def_id = self.ast_map.local_def_id(item.id);
+ let item_def_id = self.definitions.local_def_id(item.id);
let mut is_static_method = false;
let (def, ns) = match item.node {
- hir::ConstTraitItem(..) => (Def::AssociatedConst(item_def_id), ValueNS),
- hir::MethodTraitItem(ref sig, _) => {
- is_static_method = sig.explicit_self.node == hir::SelfStatic;
+ TraitItemKind::Const(..) => (Def::AssociatedConst(item_def_id), ValueNS),
+ TraitItemKind::Method(ref sig, _) => {
+ is_static_method = sig.explicit_self.node == SelfKind::Static;
(Def::Method(item_def_id), ValueNS)
}
- hir::TypeTraitItem(..) => (Def::AssociatedTy(def_id, item_def_id), TypeNS),
+ TraitItemKind::Type(..) => (Def::AssociatedTy(def_id, item_def_id), TypeNS),
};
- self.define(module_parent, item.name, ns, (def, item.span, vis));
+ self.define(module_parent, item.ident.name, ns, (def, item.span, vis));
- self.trait_item_map.insert((item.name, def_id), is_static_method);
+ self.trait_item_map.insert((item.ident.name, def_id), is_static_method);
}
}
+ ItemKind::Mac(_) => panic!("unexpanded macro in resolve!"),
}
}
item_id: DefId,
parent: Module<'b>,
vis: ty::Visibility) {
- let name = variant.node.name;
+ let name = variant.node.name.name;
if variant.node.data.is_struct() {
// Not adding fields for variants as they are not accessed with a self receiver
- let variant_def_id = self.ast_map.local_def_id(variant.node.data.id());
+ let variant_def_id = self.definitions.local_def_id(variant.node.data.id());
self.structs.insert(variant_def_id, Vec::new());
}
// Variants are always treated as importable to allow them to be glob used.
// All variants are defined in both type and value namespaces as future-proofing.
- let def = Def::Variant(item_id, self.ast_map.local_def_id(variant.node.data.id()));
+ let def = Def::Variant(item_id, self.definitions.local_def_id(variant.node.data.id()));
self.define(parent, name, ValueNS, (def, variant.span, vis));
self.define(parent, name, TypeNS, (def, variant.span, vis));
}
fn build_reduced_graph_for_foreign_item(&mut self,
foreign_item: &ForeignItem,
parent: Module<'b>) {
- let name = foreign_item.name;
+ let name = foreign_item.ident.name;
let def = match foreign_item.node {
- ForeignItemFn(..) => {
- Def::Fn(self.ast_map.local_def_id(foreign_item.id))
+ ForeignItemKind::Fn(..) => {
+ Def::Fn(self.definitions.local_def_id(foreign_item.id))
}
- ForeignItemStatic(_, m) => {
- Def::Static(self.ast_map.local_def_id(foreign_item.id), m)
+ ForeignItemKind::Static(_, m) => {
+ Def::Static(self.definitions.local_def_id(foreign_item.id), m)
}
};
self.current_module = parent;
}
}
-struct BuildReducedGraphVisitor<'a, 'b: 'a, 'tcx: 'b> {
- resolver: &'a mut Resolver<'b, 'tcx>,
+struct BuildReducedGraphVisitor<'a, 'b: 'a> {
+ resolver: &'a mut Resolver<'b>,
parent: Module<'b>,
}
-impl<'a, 'b, 'v, 'tcx> Visitor<'v> for BuildReducedGraphVisitor<'a, 'b, 'tcx> {
- fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.resolver.ast_map.expect_item(item.id))
- }
-
+impl<'a, 'b, 'v> Visitor<'v> for BuildReducedGraphVisitor<'a, 'b> {
fn visit_item(&mut self, item: &Item) {
let old_parent = self.parent;
self.resolver.build_reduced_graph_for_item(item, &mut self.parent);
- intravisit::walk_item(self, item);
+ visit::walk_item(self, item);
self.parent = old_parent;
}
fn visit_block(&mut self, block: &Block) {
let old_parent = self.parent;
self.resolver.build_reduced_graph_for_block(block, &mut self.parent);
- intravisit::walk_block(self, block);
+ visit::walk_block(self, block);
self.parent = old_parent;
}
}
use Namespace::{TypeNS, ValueNS};
use rustc::lint;
-use syntax::ast;
+use syntax::ast::{self, ViewPathGlob, ViewPathList, ViewPathSimple};
+use syntax::visit::{self, Visitor};
use syntax::codemap::{Span, DUMMY_SP};
-use rustc::hir;
-use rustc::hir::{ViewPathGlob, ViewPathList, ViewPathSimple};
-use rustc::hir::intravisit::Visitor;
-struct UnusedImportCheckVisitor<'a, 'b: 'a, 'tcx: 'b> {
- resolver: &'a mut Resolver<'b, 'tcx>,
+struct UnusedImportCheckVisitor<'a, 'b: 'a> {
+ resolver: &'a mut Resolver<'b>,
}
// Deref and DerefMut impls allow treating UnusedImportCheckVisitor as Resolver.
-impl<'a, 'b, 'tcx:'b> Deref for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
- type Target = Resolver<'b, 'tcx>;
+impl<'a, 'b> Deref for UnusedImportCheckVisitor<'a, 'b> {
+ type Target = Resolver<'b>;
- fn deref<'c>(&'c self) -> &'c Resolver<'b, 'tcx> {
+ fn deref<'c>(&'c self) -> &'c Resolver<'b> {
&*self.resolver
}
}
-impl<'a, 'b, 'tcx:'b> DerefMut for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
- fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b, 'tcx> {
+impl<'a, 'b> DerefMut for UnusedImportCheckVisitor<'a, 'b> {
+ fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b> {
&mut *self.resolver
}
}
-impl<'a, 'b, 'tcx> UnusedImportCheckVisitor<'a, 'b, 'tcx> {
+impl<'a, 'b> UnusedImportCheckVisitor<'a, 'b> {
// We have information about whether `use` (import) directives are actually
// used now. If an import is not used at all, we signal a lint error.
fn check_import(&mut self, id: ast::NodeId, span: Span) {
}
}
-impl<'a, 'b, 'v, 'tcx> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
- fn visit_item(&mut self, item: &hir::Item) {
+impl<'a, 'b, 'v> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b> {
+ fn visit_item(&mut self, item: &ast::Item) {
+ visit::walk_item(self, item);
// Ignore is_public import statements because there's no way to be sure
// whether they're used or not. Also ignore imports with a dummy span
// because this means that they were generated in some fashion by the
// compiler and we don't need to consider them.
- if item.vis == hir::Public || item.span.source_equal(&DUMMY_SP) {
+ if item.vis == ast::Visibility::Public || item.span.source_equal(&DUMMY_SP) {
return;
}
match item.node {
- hir::ItemExternCrate(_) => {
+ ast::ItemKind::ExternCrate(_) => {
if let Some(crate_num) = self.session.cstore.extern_mod_stmt_cnum(item.id) {
if !self.used_crates.contains(&crate_num) {
self.session.add_lint(lint::builtin::UNUSED_EXTERN_CRATES,
}
}
}
- hir::ItemUse(ref p) => {
+ ast::ItemKind::Use(ref p) => {
match p.node {
ViewPathSimple(_, _) => {
self.check_import(item.id, p.span)
}
}
-pub fn check_crate(resolver: &mut Resolver, krate: &hir::Crate) {
+pub fn check_crate(resolver: &mut Resolver, krate: &ast::Crate) {
let mut visitor = UnusedImportCheckVisitor { resolver: resolver };
- krate.visit_all_items(&mut visitor);
+ visit::walk_crate(&mut visitor, krate);
}
```
"##,
+E0408: r##"
+An "or" pattern was used where the variable bindings are not consistently bound
+across patterns.
+
+Example of erroneous code:
+
+```compile_fail
+match x {
+ Some(y) | None => { /* use y */ } // error: variable `y` from pattern #1 is
+ // not bound in pattern #2
+ _ => ()
+}
+```
+
+Here, `y` is bound to the contents of the `Some` and can be used within the
+block corresponding to the match arm. However, in case `x` is `None`, we have
+not specified what `y` is, and the block will use a nonexistent variable.
+
+To fix this error, either split into multiple match arms:
+
+```
+let x = Some(1);
+match x {
+ Some(y) => { /* use y */ }
+ None => { /* ... */ }
+}
+```
+
+or, bind the variable to a field of the same type in all sub-patterns of the
+or pattern:
+
+```
+let x = (0, 2);
+match x {
+ (0, y) | (y, 0) => { /* use y */}
+ _ => {}
+}
+```
+
+In this example, if `x` matches the pattern `(0, _)`, the second field is set
+to `y`. If it matches `(_, 0)`, the first field is set to `y`; so in all
+cases `y` is set to some value.
+"##,
+
+E0409: r##"
+An "or" pattern was used where the variable bindings are not consistently bound
+across patterns.
+
+Example of erroneous code:
+
+```compile_fail
+let x = (0, 2);
+match x {
+ (0, ref y) | (y, 0) => { /* use y */} // error: variable `y` is bound with
+ // different mode in pattern #2
+ // than in pattern #1
+ _ => ()
+}
+```
+
+Here, `y` is bound by-value in one case and by-reference in the other.
+
+To fix this error, just use the same mode in both cases.
+Generally using `ref` or `ref mut` where not already used will fix this:
+
+```ignore
+let x = (0, 2);
+match x {
+ (0, ref y) | (ref y, 0) => { /* use y */}
+ _ => ()
+}
+```
+
+Alternatively, split the pattern:
+
+```
+let x = (0, 2);
+match x {
+ (y, 0) => { /* use y */ }
+ (0, ref y) => { /* use y */}
+ _ => ()
+}
+```
+"##,
+
E0411: r##"
The `Self` keyword was used outside an impl or a trait. Erroneous code example:
// E0258,
E0402, // cannot use an outer type parameter in this context
E0406, // undeclared associated type
- E0408, // variable from pattern #1 is not bound in pattern #
- E0409, // variable is bound with different mode in pattern # than in
- // pattern #1
- E0410, // variable from pattern is not bound in pattern 1
+// E0410, merged into 408
E0418, // is not an enum variant, struct or const
E0420, // is not an associated const
E0421, // unresolved associated const
use self::BareIdentifierPatternResolution::*;
use self::ParentLink::*;
-use rustc::dep_graph::DepNode;
-use rustc::hir::map as hir_map;
+use rustc::hir::map::Definitions;
+use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr};
use rustc::session::Session;
use rustc::lint;
use rustc::hir::def::*;
use rustc::hir::def_id::DefId;
-use rustc::hir::pat_util::pat_bindings;
use rustc::ty;
use rustc::ty::subst::{ParamSpace, FnSpace, TypeSpace};
use rustc::hir::{Freevar, FreevarMap, TraitCandidate, TraitMap, GlobMap};
use rustc::util::nodemap::{NodeMap, NodeSet, FnvHashMap, FnvHashSet};
+use syntax::ext::mtwt;
use syntax::ast::{self, FloatTy};
use syntax::ast::{CRATE_NODE_ID, Name, NodeId, CrateNum, IntTy, UintTy};
-use syntax::codemap::{self, Span, Pos};
+use syntax::codemap::{self, Span};
use syntax::errors::DiagnosticBuilder;
use syntax::parse::token::{self, keywords};
use syntax::util::lev_distance::find_best_match_for_name;
-use rustc::hir::intravisit::{self, FnKind, Visitor};
-use rustc::hir;
-use rustc::hir::{Arm, BindByRef, BindByValue, BindingMode, Block};
-use rustc::hir::Crate;
-use rustc::hir::{Expr, ExprAgain, ExprBreak, ExprField};
-use rustc::hir::{ExprLoop, ExprWhile, ExprMethodCall};
-use rustc::hir::{ExprPath, ExprStruct, FnDecl};
-use rustc::hir::{ForeignItemFn, ForeignItemStatic, Generics};
-use rustc::hir::{ImplItem, Item, ItemConst, ItemEnum, ItemExternCrate};
-use rustc::hir::{ItemFn, ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
-use rustc::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
-use rustc::hir::Local;
-use rustc::hir::{Pat, PatKind, Path, PrimTy};
-use rustc::hir::{PathSegment, PathParameters};
-use rustc::hir::HirVec;
-use rustc::hir::{TraitRef, Ty, TyBool, TyChar, TyFloat, TyInt};
-use rustc::hir::{TyRptr, TyStr, TyUint, TyPath};
+use syntax::visit::{self, FnKind, Visitor};
+use syntax::ast::{Arm, BindingMode, Block, Crate, Expr, ExprKind};
+use syntax::ast::{FnDecl, ForeignItem, ForeignItemKind, Generics};
+use syntax::ast::{Item, ItemKind, ImplItem, ImplItemKind};
+use syntax::ast::{Local, Pat, PatKind, Path};
+use syntax::ast::{PathSegment, PathParameters, TraitItemKind, TraitRef, Ty, TyKind};
use std::collections::{HashMap, HashSet};
use std::cell::{Cell, RefCell};
TypeNotMemberOfTrait(Name, &'a str),
/// error E0438: const is not a member of trait
ConstNotMemberOfTrait(Name, &'a str),
- /// error E0408: variable `{}` from pattern #1 is not bound in pattern
- VariableNotBoundInPattern(Name, usize),
+ /// error E0408: variable `{}` from pattern #{} is not bound in pattern #{}
+ VariableNotBoundInPattern(Name, usize, usize),
/// error E0409: variable is bound with different mode in pattern #{} than in pattern #1
VariableBoundWithDifferentMode(Name, usize),
- /// error E0410: variable from pattern is not bound in pattern #1
- VariableNotBoundInParentPattern(Name, usize),
/// error E0411: use of `Self` outside of an impl or trait
SelfUsedOutsideImplOrTrait,
/// error E0412: use of undeclared
/// error E0416: identifier is bound more than once in the same pattern
IdentifierBoundMoreThanOnceInSamePattern(&'a str),
/// error E0417: static variables cannot be referenced in a pattern
- StaticVariableReference(DefId, Option<Name>),
+ StaticVariableReference(&'a NameBinding<'a>),
/// error E0418: is not an enum variant, struct or const
NotAnEnumVariantStructOrConst(&'a str),
/// error E0419: unresolved enum variant, struct or const
Other,
}
-fn resolve_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>,
- span: syntax::codemap::Span,
- resolution_error: ResolutionError<'b>) {
+fn resolve_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>,
+ span: syntax::codemap::Span,
+ resolution_error: ResolutionError<'c>) {
resolve_struct_error(resolver, span, resolution_error).emit();
}
-fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>,
- span: syntax::codemap::Span,
- resolution_error: ResolutionError<'b>)
- -> DiagnosticBuilder<'a> {
+fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>,
+ span: syntax::codemap::Span,
+ resolution_error: ResolutionError<'c>)
+ -> DiagnosticBuilder<'a> {
if !resolver.emit_errors {
return resolver.session.diagnostic().struct_dummy();
}
const_,
trait_)
}
- ResolutionError::VariableNotBoundInPattern(variable_name, pattern_number) => {
+ ResolutionError::VariableNotBoundInPattern(variable_name, from, to) => {
struct_span_err!(resolver.session,
span,
E0408,
- "variable `{}` from pattern #1 is not bound in pattern #{}",
+ "variable `{}` from pattern #{} is not bound in pattern #{}",
variable_name,
- pattern_number)
+ from,
+ to)
}
ResolutionError::VariableBoundWithDifferentMode(variable_name, pattern_number) => {
struct_span_err!(resolver.session,
variable_name,
pattern_number)
}
- ResolutionError::VariableNotBoundInParentPattern(variable_name, pattern_number) => {
- struct_span_err!(resolver.session,
- span,
- E0410,
- "variable `{}` from pattern #{} is not bound in pattern #1",
- variable_name,
- pattern_number)
- }
ResolutionError::SelfUsedOutsideImplOrTrait => {
struct_span_err!(resolver.session,
span,
"identifier `{}` is bound more than once in the same pattern",
identifier)
}
- ResolutionError::StaticVariableReference(did, name) => {
+ ResolutionError::StaticVariableReference(binding) => {
let mut err = struct_span_err!(resolver.session,
span,
E0417,
"static variables cannot be referenced in a \
pattern, use a `const` instead");
- if let Some(sp) = resolver.ast_map.span_if_local(did) {
- err.span_note(sp, "static variable defined here");
- }
- if let Some(name) = name {
- if let Some(binding) = resolver.current_module
- .resolve_name_in_lexical_scope(name, ValueNS) {
- if binding.is_import() {
- err.span_note(binding.span, "static variable imported here");
- }
- }
+ if binding.span != codemap::DUMMY_SP {
+ let participle = if binding.is_import() { "imported" } else { "defined" };
+ err.span_note(binding.span, &format!("static variable {} here", participle));
}
err
}
UnresolvedNameContext::Other => { } // no help available
UnresolvedNameContext::PathIsMod(parent) => {
err.help(&match parent.map(|parent| &parent.node) {
- Some(&ExprField(_, ident)) => {
+ Some(&ExprKind::Field(_, ident)) => {
format!("To reference an item from the `{module}` module, \
use `{module}::{ident}`",
module = path,
ident = ident.node)
}
- Some(&ExprMethodCall(ident, _, _)) => {
+ Some(&ExprKind::MethodCall(ident, _, _)) => {
format!("To call a function from the `{module}` module, \
use `{module}::{ident}(..)`",
module = path,
ValueNS,
}
-impl<'a, 'v, 'tcx> Visitor<'v> for Resolver<'a, 'tcx> {
- fn visit_nested_item(&mut self, item: hir::ItemId) {
- self.visit_item(self.ast_map.expect_item(item.id))
- }
+impl<'a, 'v> Visitor<'v> for Resolver<'a> {
fn visit_item(&mut self, item: &Item) {
self.resolve_item(item);
}
fn visit_generics(&mut self, generics: &Generics) {
self.resolve_generics(generics);
}
- fn visit_poly_trait_ref(&mut self, tref: &hir::PolyTraitRef, m: &hir::TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self, tref: &ast::PolyTraitRef, m: &ast::TraitBoundModifier) {
match self.resolve_trait_reference(tref.trait_ref.ref_id, &tref.trait_ref.path, 0) {
Ok(def) => self.record_def(tref.trait_ref.ref_id, def),
Err(_) => {
self.record_def(tref.trait_ref.ref_id, err_path_resolution())
}
}
- intravisit::walk_poly_trait_ref(self, tref, m);
+ visit::walk_poly_trait_ref(self, tref, m);
}
fn visit_variant(&mut self,
- variant: &hir::Variant,
+ variant: &ast::Variant,
generics: &Generics,
item_id: ast::NodeId) {
if let Some(ref dis_expr) = variant.node.disr_expr {
});
}
- // `intravisit::walk_variant` without the discriminant expression.
+ // `visit::walk_variant` without the discriminant expression.
self.visit_variant_data(&variant.node.data,
variant.node.name,
generics,
item_id,
variant.span);
}
- fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem) {
+ fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
let type_parameters = match foreign_item.node {
- ForeignItemFn(_, ref generics) => {
+ ForeignItemKind::Fn(_, ref generics) => {
HasTypeParameters(generics, FnSpace, ItemRibKind)
}
- ForeignItemStatic(..) => NoTypeParameters,
+ ForeignItemKind::Static(..) => NoTypeParameters,
};
self.with_type_parameter_rib(type_parameters, |this| {
- intravisit::walk_foreign_item(this, foreign_item);
+ visit::walk_foreign_item(this, foreign_item);
});
}
fn visit_fn(&mut self,
_: Span,
node_id: NodeId) {
let rib_kind = match function_kind {
- FnKind::ItemFn(_, generics, _, _, _, _, _) => {
+ FnKind::ItemFn(_, generics, _, _, _, _) => {
self.visit_generics(generics);
ItemRibKind
}
- FnKind::Method(_, sig, _, _) => {
+ FnKind::Method(_, sig, _) => {
self.visit_generics(&sig.generics);
self.visit_explicit_self(&sig.explicit_self);
MethodRibKind
}
- FnKind::Closure(_) => ClosureRibKind(node_id),
+ FnKind::Closure => ClosureRibKind(node_id),
};
self.resolve_function(rib_kind, declaration, block);
}
}
#[derive(Copy, Clone)]
-enum TypeParameters<'tcx, 'a> {
+enum TypeParameters<'a, 'b> {
NoTypeParameters,
HasTypeParameters(// Type parameters.
- &'a Generics,
+ &'b Generics,
// Identifies the things that these parameters
// were declared on (type, fn, etc)
ParamSpace,
// The kind of the rib used for type parameters.
- RibKind<'tcx>),
+ RibKind<'a>),
}
// The rib kind controls the translation of local
}
}
- fn def(self) -> Def {
- self.local_def().def
- }
-
fn module(self) -> Option<Module<'a>> {
match self {
LexicalScopeBinding::Item(binding) => binding.module(),
}
/// The main resolver class.
-pub struct Resolver<'a, 'tcx: 'a> {
+pub struct Resolver<'a> {
session: &'a Session,
- ast_map: &'a hir_map::Map<'tcx>,
+ definitions: &'a mut Definitions,
graph_root: Module<'a>,
// The idents for the primitive types.
primitive_type_table: PrimitiveTypeTable,
- def_map: RefCell<DefMap>,
- freevars: FreevarMap,
+ pub def_map: DefMap,
+ pub freevars: FreevarMap,
freevars_seen: NodeMap<NodeMap<usize>>,
- export_map: ExportMap,
- trait_map: TraitMap,
+ pub export_map: ExportMap,
+ pub trait_map: TraitMap,
// A map from nodes to modules, both normal (`mod`) modules and anonymous modules.
// Anonymous modules are pseudo-modules that are implicitly created around items
// so as to avoid printing duplicate errors
emit_errors: bool,
- make_glob_map: bool,
+ pub make_glob_map: bool,
// Maps imports to the names of items actually imported (this actually maps
// all imports, but only glob imports are actually interesting).
- glob_map: GlobMap,
+ pub glob_map: GlobMap,
used_imports: HashSet<(NodeId, Namespace)>,
used_crates: HashSet<CrateNum>,
- maybe_unused_trait_imports: NodeSet,
+ pub maybe_unused_trait_imports: NodeSet,
privacy_errors: Vec<PrivacyError<'a>>,
}
}
-impl<'a, 'tcx> ty::NodeIdTree for Resolver<'a, 'tcx> {
+impl<'a> ty::NodeIdTree for Resolver<'a> {
fn is_descendant_of(&self, node: NodeId, ancestor: NodeId) -> bool {
- let ancestor = self.ast_map.local_def_id(ancestor);
+ let ancestor = self.definitions.local_def_id(ancestor);
let mut module = *self.module_map.get(&node).unwrap();
while module.def_id() != Some(ancestor) {
let module_parent = match self.get_nearest_normal_module_parent(module) {
}
}
-impl<'a, 'tcx> Resolver<'a, 'tcx> {
+impl<'a> hir::lowering::Resolver for Resolver<'a> {
+ fn resolve_generated_global_path(&mut self, path: &hir::Path, is_value: bool) -> Def {
+ let namespace = if is_value { ValueNS } else { TypeNS };
+ match self.resolve_crate_relative_path(path.span, &path.segments, namespace) {
+ Ok(binding) => binding.def().unwrap(),
+ Err(true) => Def::Err,
+ Err(false) => {
+ let path_name = &format!("{}", path);
+ let error =
+ ResolutionError::UnresolvedName(path_name, "", UnresolvedNameContext::Other);
+ resolve_error(self, path.span, error);
+ Def::Err
+ }
+ }
+ }
+
+ fn record_resolution(&mut self, id: NodeId, def: Def) {
+ self.def_map.insert(id, PathResolution { base_def: def, depth: 0 });
+ }
+
+ fn definitions(&mut self) -> Option<&mut Definitions> {
+ Some(self.definitions)
+ }
+}
+
+trait Named {
+ fn name(&self) -> Name;
+}
+
+impl Named for ast::PathSegment {
+ fn name(&self) -> Name {
+ self.identifier.name
+ }
+}
+
+impl Named for hir::PathSegment {
+ fn name(&self) -> Name {
+ self.identifier.name
+ }
+}
+
+impl<'a> Resolver<'a> {
fn new(session: &'a Session,
- ast_map: &'a hir_map::Map<'tcx>,
+ definitions: &'a mut Definitions,
make_glob_map: MakeGlobMap,
arenas: &'a ResolverArenas<'a>)
- -> Resolver<'a, 'tcx> {
- let root_def_id = ast_map.local_def_id(CRATE_NODE_ID);
+ -> Resolver<'a> {
+ let root_def_id = definitions.local_def_id(CRATE_NODE_ID);
let graph_root =
ModuleS::new(NoParentLink, Some(Def::Mod(root_def_id)), false, arenas);
let graph_root = arenas.alloc_module(graph_root);
Resolver {
session: session,
- ast_map: ast_map,
+ definitions: definitions,
// The outermost module has def ID 0; this is not reflected in the
// AST.
primitive_type_table: PrimitiveTypeTable::new(),
- def_map: RefCell::new(NodeMap()),
+ def_map: NodeMap(),
freevars: NodeMap(),
freevars_seen: NodeMap(),
export_map: NodeMap(),
Failed(None) => {
let segment_name = name.as_str();
let module_name = module_to_string(search_module);
- let mut span = span;
let msg = if "???" == &module_name {
- span.hi = span.lo + Pos::from_usize(segment_name.len());
-
match search_parent_externals(name, &self.current_module) {
Some(module) => {
let path_str = names_to_string(module_path);
// This is not a crate-relative path. We resolve the
// first component of the path in the current lexical
// scope and then proceed to resolve below that.
- let ident = hir::Ident::from_name(module_path[0]);
+ let ident = ast::Ident::with_empty_ctxt(module_path[0]);
match self.resolve_ident_in_lexical_scope(ident, TypeNS, true)
.and_then(LexicalScopeBinding::module) {
None => return Failed(None),
/// Invariant: This must only be called during main resolution, not during
/// import resolution.
fn resolve_ident_in_lexical_scope(&mut self,
- ident: hir::Ident,
+ ident: ast::Ident,
ns: Namespace,
record_used: bool)
-> Option<LexicalScopeBinding<'a>> {
- let name = match ns { ValueNS => ident.name, TypeNS => ident.unhygienic_name };
+ let name = match ns { ValueNS => mtwt::resolve(ident), TypeNS => ident.name };
// Walk backwards up the ribs in scope.
for i in (0 .. self.get_ribs(ns).len()).rev() {
}
if let ModuleRibKind(module) = self.get_ribs(ns)[i].kind {
- let name = ident.unhygienic_name;
+ let name = ident.name;
let item = self.resolve_name_in_module(module, name, ns, true, record_used);
if let Success(binding) = item {
// The ident resolves to an item.
None
}
- fn resolve_crate(&mut self, krate: &hir::Crate) {
+ fn resolve_crate(&mut self, krate: &Crate) {
debug!("(resolving crate) starting");
self.current_module = self.graph_root;
- intravisit::walk_crate(self, krate);
+ visit::walk_crate(self, krate);
}
fn resolve_item(&mut self, item: &Item) {
- let name = item.name;
+ let name = item.ident.name;
debug!("(resolving item) resolving {}", name);
match item.node {
- ItemEnum(_, ref generics) |
- ItemTy(_, ref generics) |
- ItemStruct(_, ref generics) => {
+ ItemKind::Enum(_, ref generics) |
+ ItemKind::Ty(_, ref generics) |
+ ItemKind::Struct(_, ref generics) => {
self.with_type_parameter_rib(HasTypeParameters(generics, TypeSpace, ItemRibKind),
- |this| intravisit::walk_item(this, item));
+ |this| visit::walk_item(this, item));
}
- ItemFn(_, _, _, _, ref generics, _) => {
+ ItemKind::Fn(_, _, _, _, ref generics, _) => {
self.with_type_parameter_rib(HasTypeParameters(generics, FnSpace, ItemRibKind),
- |this| intravisit::walk_item(this, item));
+ |this| visit::walk_item(this, item));
}
- ItemDefaultImpl(_, ref trait_ref) => {
+ ItemKind::DefaultImpl(_, ref trait_ref) => {
self.with_optional_trait_ref(Some(trait_ref), |_, _| {});
}
- ItemImpl(_, _, ref generics, ref opt_trait_ref, ref self_type, ref impl_items) => {
+ ItemKind::Impl(_, _, ref generics, ref opt_trait_ref, ref self_type, ref impl_items) =>
self.resolve_implementation(generics,
opt_trait_ref,
&self_type,
item.id,
- impl_items);
- }
+ impl_items),
- ItemTrait(_, ref generics, ref bounds, ref trait_items) => {
+ ItemKind::Trait(_, ref generics, ref bounds, ref trait_items) => {
// Create a new rib for the trait-wide type parameters.
self.with_type_parameter_rib(HasTypeParameters(generics,
TypeSpace,
ItemRibKind),
|this| {
- let local_def_id = this.ast_map.local_def_id(item.id);
+ let local_def_id = this.definitions.local_def_id(item.id);
this.with_self_rib(Def::SelfTy(Some(local_def_id), None), |this| {
this.visit_generics(generics);
walk_list!(this, visit_ty_param_bound, bounds);
for trait_item in trait_items {
match trait_item.node {
- hir::ConstTraitItem(_, ref default) => {
+ TraitItemKind::Const(_, ref default) => {
// Only impose the restrictions of
// ConstRibKind if there's an actual constant
// expression in a provided default.
if default.is_some() {
this.with_constant_rib(|this| {
- intravisit::walk_trait_item(this, trait_item)
+ visit::walk_trait_item(this, trait_item)
});
} else {
- intravisit::walk_trait_item(this, trait_item)
+ visit::walk_trait_item(this, trait_item)
}
}
- hir::MethodTraitItem(ref sig, _) => {
+ TraitItemKind::Method(ref sig, _) => {
let type_parameters =
HasTypeParameters(&sig.generics,
FnSpace,
MethodRibKind);
this.with_type_parameter_rib(type_parameters, |this| {
- intravisit::walk_trait_item(this, trait_item)
+ visit::walk_trait_item(this, trait_item)
});
}
- hir::TypeTraitItem(..) => {
+ TraitItemKind::Type(..) => {
this.with_type_parameter_rib(NoTypeParameters, |this| {
- intravisit::walk_trait_item(this, trait_item)
+ visit::walk_trait_item(this, trait_item)
});
}
};
});
}
- ItemMod(_) | ItemForeignMod(_) => {
+ ItemKind::Mod(_) | ItemKind::ForeignMod(_) => {
self.with_scope(item.id, |this| {
- intravisit::walk_item(this, item);
+ visit::walk_item(this, item);
});
}
- ItemConst(..) | ItemStatic(..) => {
+ ItemKind::Const(..) | ItemKind::Static(..) => {
self.with_constant_rib(|this| {
- intravisit::walk_item(this, item);
+ visit::walk_item(this, item);
});
}
- ItemUse(ref view_path) => {
+ ItemKind::Use(ref view_path) => {
match view_path.node {
- hir::ViewPathList(ref prefix, ref items) => {
+ ast::ViewPathList(ref prefix, ref items) => {
// Resolve prefix of an import with empty braces (issue #28388)
if items.is_empty() && !prefix.segments.is_empty() {
match self.resolve_crate_relative_path(prefix.span,
}
}
- ItemExternCrate(_) => {
+ ItemKind::ExternCrate(_) => {
// do nothing, these are just around to be encoded
}
+
+ ItemKind::Mac(_) => panic!("unexpanded macro in resolve!"),
}
}
let mut function_type_rib = Rib::new(rib_kind);
let mut seen_bindings = HashSet::new();
for (index, type_parameter) in generics.ty_params.iter().enumerate() {
- let name = type_parameter.name;
+ let name = type_parameter.ident.name;
debug!("with_type_parameter_rib: {}", type_parameter.id);
if seen_bindings.contains(&name) {
seen_bindings.insert(name);
// plain insert (no renaming)
- let def_id = self.ast_map.local_def_id(type_parameter.id);
+ let def_id = self.definitions.local_def_id(type_parameter.id);
let def = Def::TyParam(space, index as u32, def_id, name);
function_type_rib.bindings.insert(name, def);
}
debug!("(resolving function) recorded argument");
}
- intravisit::walk_fn_ret_ty(self, &declaration.output);
+ visit::walk_fn_ret_ty(self, &declaration.output);
// Resolve the function body.
self.visit_block(block);
fn resolve_generics(&mut self, generics: &Generics) {
for predicate in &generics.where_clause.predicates {
match predicate {
- &hir::WherePredicate::BoundPredicate(_) |
- &hir::WherePredicate::RegionPredicate(_) => {}
- &hir::WherePredicate::EqPredicate(ref eq_pred) => {
+ &ast::WherePredicate::BoundPredicate(_) |
+ &ast::WherePredicate::RegionPredicate(_) => {}
+ &ast::WherePredicate::EqPredicate(ref eq_pred) => {
self.resolve_path(eq_pred.id, &eq_pred.path, 0, TypeNS).and_then(|path_res| {
if let PathResolution { base_def: Def::TyParam(..), .. } = path_res {
Ok(self.record_def(eq_pred.id, path_res))
}
}
}
- intravisit::walk_generics(self, generics);
+ visit::walk_generics(self, generics);
}
fn with_current_self_type<T, F>(&mut self, self_type: &Ty, f: F) -> T
} else {
self.record_def(trait_ref.ref_id, err_path_resolution());
}
- intravisit::walk_trait_ref(self, trait_ref);
+ visit::walk_trait_ref(self, trait_ref);
}
let original_trait_ref = replace(&mut self.current_trait_ref, new_val);
let result = f(self, new_id);
// Resolve the self type.
this.visit_ty(self_type);
- this.with_self_rib(Def::SelfTy(trait_id, Some((item_id, self_type.id))), |this| {
+ this.with_self_rib(Def::SelfTy(trait_id, Some(item_id)), |this| {
this.with_current_self_type(self_type, |this| {
for impl_item in impl_items {
this.resolve_visibility(&impl_item.vis);
match impl_item.node {
- hir::ImplItemKind::Const(..) => {
+ ImplItemKind::Const(..) => {
// If this is a trait impl, ensure the const
// exists in trait
- this.check_trait_item(impl_item.name,
+ this.check_trait_item(impl_item.ident.name,
impl_item.span,
|n, s| ResolutionError::ConstNotMemberOfTrait(n, s));
this.with_constant_rib(|this| {
- intravisit::walk_impl_item(this, impl_item);
+ visit::walk_impl_item(this, impl_item);
});
}
- hir::ImplItemKind::Method(ref sig, _) => {
+ ImplItemKind::Method(ref sig, _) => {
// If this is a trait impl, ensure the method
// exists in trait
- this.check_trait_item(impl_item.name,
+ this.check_trait_item(impl_item.ident.name,
impl_item.span,
|n, s| ResolutionError::MethodNotMemberOfTrait(n, s));
FnSpace,
MethodRibKind);
this.with_type_parameter_rib(type_parameters, |this| {
- intravisit::walk_impl_item(this, impl_item);
+ visit::walk_impl_item(this, impl_item);
});
}
- hir::ImplItemKind::Type(ref ty) => {
+ ImplItemKind::Type(ref ty) => {
// If this is a trait impl, ensure the type
// exists in trait
- this.check_trait_item(impl_item.name,
+ this.check_trait_item(impl_item.ident.name,
impl_item.span,
|n, s| ResolutionError::TypeNotMemberOfTrait(n, s));
this.visit_ty(ty);
}
+ ImplItemKind::Macro(_) => panic!("unexpanded macro in resolve!"),
}
}
});
// that expands into an or-pattern where one 'x' was from the
// user and one 'x' came from the macro.
fn binding_mode_map(&mut self, pat: &Pat) -> BindingMap {
- let mut result = HashMap::new();
- pat_bindings(&self.def_map, pat, |binding_mode, _id, sp, path1| {
- let name = path1.node;
- result.insert(name,
- BindingInfo {
- span: sp,
- binding_mode: binding_mode,
- });
+ let mut binding_map = HashMap::new();
+
+ pat.walk(&mut |pat| {
+ if let PatKind::Ident(binding_mode, ident, ref sub_pat) = pat.node {
+ if sub_pat.is_some() || match self.def_map.get(&pat.id) {
+ Some(&PathResolution { base_def: Def::Local(..), .. }) => true,
+ _ => false,
+ } {
+ let binding_info = BindingInfo { span: ident.span, binding_mode: binding_mode };
+ binding_map.insert(mtwt::resolve(ident.node), binding_info);
+ }
+ }
+ true
});
- return result;
+
+ binding_map
}
// check that all of the arms in an or-pattern have exactly the
None => {
resolve_error(self,
p.span,
- ResolutionError::VariableNotBoundInPattern(key, i + 1));
+ ResolutionError::VariableNotBoundInPattern(key, 1, i + 1));
}
Some(binding_i) => {
if binding_0.binding_mode != binding_i.binding_mode {
if !map_0.contains_key(&key) {
resolve_error(self,
binding.span,
- ResolutionError::VariableNotBoundInParentPattern(key, i + 1));
+ ResolutionError::VariableNotBoundInPattern(key, i + 1, 1));
}
}
}
}
// Descend into the block.
- intravisit::walk_block(self, block);
+ visit::walk_block(self, block);
// Move back up.
self.current_module = orig_module;
fn resolve_type(&mut self, ty: &Ty) {
match ty.node {
- TyPath(ref maybe_qself, ref path) => {
+ TyKind::Path(ref maybe_qself, ref path) => {
let resolution = match self.resolve_possibly_assoc_item(ty.id,
maybe_qself.as_ref(),
path,
// `<T>::a::b::c` is resolved by typeck alone.
TypecheckRequired => {
// Resolve embedded types.
- intravisit::walk_ty(self, ty);
+ visit::walk_ty(self, ty);
return;
}
ResolveAttempt(resolution) => resolution,
_ => {}
}
// Resolve embedded types.
- intravisit::walk_ty(self, ty);
+ visit::walk_ty(self, ty);
}
fn resolve_pattern(&mut self,
// pattern that binds them
bindings_list: &mut HashMap<Name, NodeId>) {
let pat_id = pattern.id;
- pattern.walk(|pattern| {
+ pattern.walk(&mut |pattern| {
match pattern.node {
PatKind::Ident(binding_mode, ref path1, ref at_rhs) => {
// The meaning of PatKind::Ident with no type parameters
let const_ok = mode == RefutableMode && at_rhs.is_none();
let ident = path1.node;
- let renamed = ident.name;
+ let renamed = mtwt::resolve(ident);
match self.resolve_bare_identifier_pattern(ident, pattern.span) {
FoundStructOrEnumVariant(def) if const_ok => {
BareIdentifierPatternUnresolved => {
debug!("(resolving pattern) binding `{}`", renamed);
- let def_id = self.ast_map.local_def_id(pattern.id);
+ let def_id = self.definitions.local_def_id(pattern.id);
let def = Def::Local(def_id, pattern.id);
// Record the definition so that later passes
Def::Variant(..) | Def::Const(..) => {
self.record_def(pattern.id, path_res);
}
- Def::Static(did, _) => {
- resolve_error(&self,
- path.span,
- ResolutionError::StaticVariableReference(
- did, None));
+ Def::Static(..) => {
+ let segments = &path.segments;
+ let binding = if path.global {
+ self.resolve_crate_relative_path(path.span, segments, ValueNS)
+ } else {
+ self.resolve_module_relative_path(path.span, segments, ValueNS)
+ }.unwrap();
+
+ let error = ResolutionError::StaticVariableReference(binding);
+ resolve_error(self, path.span, error);
self.record_def(pattern.id, err_path_resolution());
}
_ => {
);
self.record_def(pattern.id, err_path_resolution());
}
- intravisit::walk_path(self, path);
+ visit::walk_path(self, path);
}
PatKind::QPath(ref qself, ref path) => {
.name;
let traits = self.get_traits_containing_item(const_name);
self.trait_map.insert(pattern.id, traits);
- intravisit::walk_pat(self, pattern);
+ visit::walk_pat(self, pattern);
return true;
}
ResolveAttempt(resolution) => resolution,
.as_str()));
self.record_def(pattern.id, err_path_resolution());
}
- intravisit::walk_pat(self, pattern);
+ visit::walk_pat(self, pattern);
}
PatKind::Struct(ref path, _, _) => {
self.record_def(pattern.id, err_path_resolution());
}
}
- intravisit::walk_path(self, path);
+ visit::walk_path(self, path);
}
PatKind::Lit(_) | PatKind::Range(..) => {
- intravisit::walk_pat(self, pattern);
+ visit::walk_pat(self, pattern);
}
_ => {
});
}
- fn resolve_bare_identifier_pattern(&mut self, ident: hir::Ident, span: Span)
+ fn resolve_bare_identifier_pattern(&mut self, ident: ast::Ident, span: Span)
-> BareIdentifierPatternResolution {
- match self.resolve_ident_in_lexical_scope(ident, ValueNS, true)
- .map(LexicalScopeBinding::def) {
- Some(def @ Def::Variant(..)) | Some(def @ Def::Struct(..)) => {
- FoundStructOrEnumVariant(def)
- }
- Some(def @ Def::Const(..)) | Some(def @ Def::AssociatedConst(..)) => {
- FoundConst(def, ident.unhygienic_name)
- }
- Some(Def::Static(did, _)) => {
- resolve_error(self, span, ResolutionError::StaticVariableReference(
- did, Some(ident.unhygienic_name)));
+ let binding = match self.resolve_ident_in_lexical_scope(ident, ValueNS, true) {
+ Some(LexicalScopeBinding::Item(binding)) => binding,
+ _ => return BareIdentifierPatternUnresolved,
+ };
+ let def = binding.def().unwrap();
+
+ match def {
+ Def::Variant(..) | Def::Struct(..) => FoundStructOrEnumVariant(def),
+ Def::Const(..) | Def::AssociatedConst(..) => FoundConst(def, ident.name),
+ Def::Static(..) => {
+ let error = ResolutionError::StaticVariableReference(binding);
+ resolve_error(self, span, error);
BareIdentifierPatternUnresolved
}
_ => BareIdentifierPatternUnresolved,
/// Handles paths that may refer to associated items
fn resolve_possibly_assoc_item(&mut self,
id: NodeId,
- maybe_qself: Option<&hir::QSelf>,
+ maybe_qself: Option<&ast::QSelf>,
path: &Path,
namespace: Namespace)
-> AssocItemResolveResult {
None | Some(LocalDef{def: Def::Mod(..), ..}) if namespace == TypeNS =>
this.primitive_type_table
.primitive_types
- .get(&last_ident.unhygienic_name)
+ .get(&last_ident.name)
.map_or(def, |prim_ty| Some(LocalDef::from_def(Def::PrimTy(*prim_ty)))),
_ => def
}
// Resolve a single identifier
fn resolve_identifier(&mut self,
- identifier: hir::Ident,
+ identifier: ast::Ident,
namespace: Namespace,
record_used: bool)
-> Option<LocalDef> {
- if identifier.unhygienic_name == keywords::Invalid.name() {
+ if identifier.name == keywords::Invalid.name() {
return Some(LocalDef::from_def(Def::Err));
}
}
ClosureRibKind(function_id) => {
let prev_def = def;
- let node_def_id = self.ast_map.local_def_id(node_id);
+ let node_def_id = self.definitions.local_def_id(node_id);
let seen = self.freevars_seen
.entry(function_id)
// resolve a "module-relative" path, e.g. a::b::c
fn resolve_module_relative_path(&mut self,
span: Span,
- segments: &[hir::PathSegment],
+ segments: &[ast::PathSegment],
namespace: Namespace)
-> Result<&'a NameBinding<'a>,
bool /* true if an error was reported */> {
/// Invariant: This must be called only during main resolution, not during
/// import resolution.
- fn resolve_crate_relative_path(&mut self,
- span: Span,
- segments: &[hir::PathSegment],
- namespace: Namespace)
- -> Result<&'a NameBinding<'a>,
- bool /* true if an error was reported */> {
- let module_path = segments.split_last()
- .unwrap()
- .1
- .iter()
- .map(|ps| ps.identifier.name)
- .collect::<Vec<_>>();
-
+ fn resolve_crate_relative_path<T>(&mut self, span: Span, segments: &[T], namespace: Namespace)
+ -> Result<&'a NameBinding<'a>,
+ bool /* true if an error was reported */>
+ where T: Named,
+ {
+ let module_path = segments.split_last().unwrap().1.iter().map(T::name).collect::<Vec<_>>();
let root_module = self.graph_root;
let containing_module;
}
}
- let name = segments.last().unwrap().identifier.name;
+ let name = segments.last().unwrap().name();
let result = self.resolve_name_in_module(containing_module, name, namespace, false, true);
result.success().map(|binding| {
self.check_privacy(name, binding, span);
fn find_fallback_in_self_type(&mut self, name: Name) -> FallbackSuggestion {
fn extract_node_id(t: &Ty) -> Option<NodeId> {
match t.node {
- TyPath(None, _) => Some(t.id),
- TyRptr(_, ref mut_ty) => extract_node_id(&mut_ty.ty),
+ TyKind::Path(None, _) => Some(t.id),
+ TyKind::Rptr(_, ref mut_ty) => extract_node_id(&mut_ty.ty),
// This doesn't handle the remaining `Ty` variants as they are not
// that commonly the self_type, it might be interesting to provide
// support for those in future.
if let Some(node_id) = self.current_self_type.as_ref().and_then(extract_node_id) {
// Look for a field with the same name in the current self_type.
- match self.def_map.borrow().get(&node_id).map(|d| d.full_def()) {
+ match self.def_map.get(&node_id).map(|d| d.full_def()) {
Some(Def::Enum(did)) |
Some(Def::TyAlias(did)) |
Some(Def::Struct(did)) |
} SuggestionType::NotFound
}
+ fn resolve_labeled_block(&mut self, label: Option<ast::Ident>, id: NodeId, block: &Block) {
+ if let Some(label) = label {
+ let (label, def) = (mtwt::resolve(label), Def::Label(id));
+ self.with_label_rib(|this| {
+ this.label_ribs.last_mut().unwrap().bindings.insert(label, def);
+ this.visit_block(block);
+ });
+ } else {
+ self.visit_block(block);
+ }
+ }
+
fn resolve_expr(&mut self, expr: &Expr, parent: Option<&Expr>) {
// First, record candidate traits for this expression if it could
// result in the invocation of a method call.
// Next, resolve the node.
match expr.node {
- ExprPath(ref maybe_qself, ref path) => {
+ ExprKind::Path(ref maybe_qself, ref path) => {
let resolution = match self.resolve_possibly_assoc_item(expr.id,
maybe_qself.as_ref(),
path,
let method_name = path.segments.last().unwrap().identifier.name;
let traits = self.get_traits_containing_item(method_name);
self.trait_map.insert(expr.id, traits);
- intravisit::walk_expr(self, expr);
+ visit::walk_expr(self, expr);
return;
}
ResolveAttempt(resolution) => resolution,
}
}
- intravisit::walk_expr(self, expr);
+ visit::walk_expr(self, expr);
}
- ExprStruct(ref path, _, _) => {
+ ExprKind::Struct(ref path, _, _) => {
// Resolve the path to the structure it goes to. We don't
// check to ensure that the path is actually a structure; that
// is checked later during typeck.
}
}
- intravisit::walk_expr(self, expr);
+ visit::walk_expr(self, expr);
}
- ExprLoop(_, Some(label)) | ExprWhile(_, _, Some(label)) => {
+ ExprKind::Loop(_, Some(label)) | ExprKind::While(_, _, Some(label)) => {
self.with_label_rib(|this| {
let def = Def::Label(expr.id);
{
let rib = this.label_ribs.last_mut().unwrap();
- rib.bindings.insert(label.name, def);
+ rib.bindings.insert(mtwt::resolve(label), def);
}
- intravisit::walk_expr(this, expr);
+ visit::walk_expr(this, expr);
})
}
- ExprBreak(Some(label)) | ExprAgain(Some(label)) => {
- match self.search_label(label.node.name) {
+ ExprKind::Break(Some(label)) | ExprKind::Again(Some(label)) => {
+ match self.search_label(mtwt::resolve(label.node)) {
None => {
self.record_def(expr.id, err_path_resolution());
resolve_error(self,
}
}
}
- ExprField(ref subexpression, _) => {
+
+ ExprKind::IfLet(ref pattern, ref subexpression, ref if_block, ref optional_else) => {
+ self.visit_expr(subexpression);
+
+ self.value_ribs.push(Rib::new(NormalRibKind));
+ self.resolve_pattern(pattern, RefutableMode, &mut HashMap::new());
+ self.visit_block(if_block);
+ self.value_ribs.pop();
+
+ optional_else.as_ref().map(|expr| self.visit_expr(expr));
+ }
+
+ ExprKind::WhileLet(ref pattern, ref subexpression, ref block, label) => {
+ self.visit_expr(subexpression);
+ self.value_ribs.push(Rib::new(NormalRibKind));
+ self.resolve_pattern(pattern, RefutableMode, &mut HashMap::new());
+
+ self.resolve_labeled_block(label, expr.id, block);
+
+ self.value_ribs.pop();
+ }
+
+ ExprKind::ForLoop(ref pattern, ref subexpression, ref block, label) => {
+ self.visit_expr(subexpression);
+ self.value_ribs.push(Rib::new(NormalRibKind));
+ self.resolve_pattern(pattern, LocalIrrefutableMode, &mut HashMap::new());
+
+ self.resolve_labeled_block(label, expr.id, block);
+
+ self.value_ribs.pop();
+ }
+
+ ExprKind::Field(ref subexpression, _) => {
self.resolve_expr(subexpression, Some(expr));
}
- ExprMethodCall(_, ref types, ref arguments) => {
+ ExprKind::MethodCall(_, ref types, ref arguments) => {
let mut arguments = arguments.iter();
self.resolve_expr(arguments.next().unwrap(), Some(expr));
for argument in arguments {
}
_ => {
- intravisit::walk_expr(self, expr);
+ visit::walk_expr(self, expr);
}
}
}
fn record_candidate_traits_for_expr_if_necessary(&mut self, expr: &Expr) {
match expr.node {
- ExprField(_, name) => {
+ ExprKind::Field(_, name) => {
// FIXME(#6890): Even though you can't treat a method like a
// field, we need to add any trait methods we find that match
// the field name so that we can do some nice error reporting
// later on in typeck.
- let traits = self.get_traits_containing_item(name.node);
+ let traits = self.get_traits_containing_item(name.node.name);
self.trait_map.insert(expr.id, traits);
}
- ExprMethodCall(name, _, _) => {
+ ExprKind::MethodCall(name, _, _) => {
debug!("(recording candidate traits for expr) recording traits for {}",
expr.id);
- let traits = self.get_traits_containing_item(name.node);
+ let traits = self.get_traits_containing_item(name.node.name);
self.trait_map.insert(expr.id, traits);
}
_ => {
if let Some(def) = name_binding.def() {
if name == lookup_name && ns == namespace && filter_fn(def) {
// create the path
- let ident = hir::Ident::from_name(name);
+ let ident = ast::Ident::with_empty_ctxt(name);
let params = PathParameters::none();
let segment = PathSegment {
identifier: ident,
let span = name_binding.span;
let mut segms = path_segments.clone();
segms.push(segment);
- let segms = HirVec::from_vec(segms);
let path = Path {
span: span,
global: true,
NoParentLink => path_segments.clone(),
ModuleParentLink(_, name) => {
let mut paths = path_segments.clone();
- let ident = hir::Ident::from_name(name);
+ let ident = ast::Ident::with_empty_ctxt(name);
let params = PathParameters::none();
let segm = PathSegment {
identifier: ident,
fn record_def(&mut self, node_id: NodeId, resolution: PathResolution) {
debug!("(recording def) recording {:?} for {}", resolution, node_id);
- if let Some(prev_res) = self.def_map.borrow_mut().insert(node_id, resolution) {
- let span = self.ast_map.opt_span(node_id).unwrap_or(codemap::DUMMY_SP);
- span_bug!(span,
- "path resolved multiple times ({:?} before, {:?} now)",
- prev_res,
- resolution);
+ if let Some(prev_res) = self.def_map.insert(node_id, resolution) {
+ panic!("path resolved multiple times ({:?} before, {:?} now)", prev_res, resolution);
}
}
pat_binding_mode: BindingMode,
descr: &str) {
match pat_binding_mode {
- BindByValue(_) => {}
- BindByRef(..) => {
+ BindingMode::ByValue(_) => {}
+ BindingMode::ByRef(..) => {
resolve_error(self,
pat.span,
ResolutionError::CannotUseRefBindingModeWith(descr));
}
}
- fn resolve_visibility(&mut self, vis: &hir::Visibility) -> ty::Visibility {
+ fn resolve_visibility(&mut self, vis: &ast::Visibility) -> ty::Visibility {
let (path, id) = match *vis {
- hir::Public => return ty::Visibility::Public,
- hir::Visibility::Crate => return ty::Visibility::Restricted(ast::CRATE_NODE_ID),
- hir::Visibility::Restricted { ref path, id } => (path, id),
- hir::Inherited => {
+ ast::Visibility::Public => return ty::Visibility::Public,
+ ast::Visibility::Crate(_) => return ty::Visibility::Restricted(ast::CRATE_NODE_ID),
+ ast::Visibility::Restricted { ref path, id } => (path, id),
+ ast::Visibility::Inherited => {
let current_module =
self.get_nearest_normal_module_parent_or_self(self.current_module);
- let id = self.ast_map.as_local_node_id(current_module.def_id().unwrap()).unwrap();
+ let id =
+ self.definitions.as_local_node_id(current_module.def_id().unwrap()).unwrap();
return ty::Visibility::Restricted(id);
}
};
Success(module) => {
let def = module.def.unwrap();
let path_resolution = PathResolution { base_def: def, depth: 0 };
- self.def_map.borrow_mut().insert(id, path_resolution);
- ty::Visibility::Restricted(self.ast_map.as_local_node_id(def.def_id()).unwrap())
+ self.def_map.insert(id, path_resolution);
+ ty::Visibility::Restricted(self.definitions.as_local_node_id(def.def_id()).unwrap())
}
Failed(Some((span, msg))) => {
self.session.span_err(span, &format!("failed to resolve module path. {}", msg));
fn is_accessible(&self, vis: ty::Visibility) -> bool {
let current_module = self.get_nearest_normal_module_parent_or_self(self.current_module);
- let node_id = self.ast_map.as_local_node_id(current_module.def_id().unwrap()).unwrap();
+ let node_id = self.definitions.as_local_node_id(current_module.def_id().unwrap()).unwrap();
vis.is_accessible_from(node_id, self)
}
}
-pub struct CrateMap {
- pub def_map: RefCell<DefMap>,
- pub freevars: FreevarMap,
- pub maybe_unused_trait_imports: NodeSet,
- pub export_map: ExportMap,
- pub trait_map: TraitMap,
- pub glob_map: Option<GlobMap>,
-}
-
#[derive(PartialEq,Copy, Clone)]
pub enum MakeGlobMap {
Yes,
}
/// Entry point to crate resolution.
-pub fn resolve_crate<'a, 'tcx>(session: &'a Session,
- ast_map: &'a hir_map::Map<'tcx>,
- make_glob_map: MakeGlobMap)
- -> CrateMap {
+pub fn resolve_crate<'a, 'b>(resolver: &'b mut Resolver<'a>, krate: &'b Crate) {
// Currently, we ignore the name resolution data structures for
// the purposes of dependency tracking. Instead we will run name
// resolution and include its output in the hash of each item,
// reflects not just its contents but the results of name
// resolution on those contents. Hopefully we'll push this back at
// some point.
- let _task = ast_map.dep_graph.in_task(DepNode::Resolve);
-
- let krate = ast_map.krate();
- let arenas = Resolver::arenas();
- let mut resolver = create_resolver(session, ast_map, krate, make_glob_map, &arenas);
+ resolver.build_reduced_graph(krate);
+ resolve_imports::resolve_imports(resolver);
resolver.resolve_crate(krate);
- check_unused::check_crate(&mut resolver, krate);
+ check_unused::check_crate(resolver, krate);
resolver.report_privacy_errors();
-
- CrateMap {
- def_map: resolver.def_map,
- freevars: resolver.freevars,
- maybe_unused_trait_imports: resolver.maybe_unused_trait_imports,
- export_map: resolver.export_map,
- trait_map: resolver.trait_map,
- glob_map: if resolver.make_glob_map {
- Some(resolver.glob_map)
- } else {
- None
- },
- }
}
-/// Builds a name resolution walker.
-fn create_resolver<'a, 'tcx>(session: &'a Session,
- ast_map: &'a hir_map::Map<'tcx>,
- krate: &'a Crate,
- make_glob_map: MakeGlobMap,
- arenas: &'a ResolverArenas<'a>)
- -> Resolver<'a, 'tcx> {
- let mut resolver = Resolver::new(session, ast_map, make_glob_map, arenas);
-
- resolver.build_reduced_graph(krate);
-
- resolve_imports::resolve_imports(&mut resolver);
-
- resolver
+pub fn with_resolver<'a, T, F>(session: &'a Session,
+ definitions: &'a mut Definitions,
+ make_glob_map: MakeGlobMap,
+ f: F) -> T
+ where F: for<'b> FnOnce(Resolver<'b>) -> T,
+{
+ let arenas = Resolver::arenas();
+ let resolver = Resolver::new(session, definitions, make_glob_map, &arenas);
+ f(resolver)
}
__build_diagnostic_array! { librustc_resolve, DIAGNOSTICS }
help: String,
}
-struct ImportResolver<'a, 'b: 'a, 'tcx: 'b> {
- resolver: &'a mut Resolver<'b, 'tcx>,
+struct ImportResolver<'a, 'b: 'a> {
+ resolver: &'a mut Resolver<'b>,
}
-impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> {
+impl<'a, 'b:'a> ImportResolver<'a, 'b> {
// Import resolution
//
// This is a fixed-point algorithm. We resolve imports until our efforts
None => value_result.success().and_then(NameBinding::def).unwrap(),
};
let path_resolution = PathResolution { base_def: def, depth: 0 };
- self.resolver.def_map.borrow_mut().insert(directive.id, path_resolution);
+ self.resolver.def_map.insert(directive.id, path_resolution);
debug!("(resolving single import) successfully resolved import");
return Success(());
// Record the destination of this import
if let Some(did) = target_module.def_id() {
- self.resolver.def_map.borrow_mut().insert(directive.id,
- PathResolution {
- base_def: Def::Mod(did),
- depth: 0,
- });
+ let resolution = PathResolution { base_def: Def::Mod(did), depth: 0 };
+ self.resolver.def_map.insert(directive.id, resolution);
}
debug!("(resolving glob import) successfully resolved import");
if reexports.len() > 0 {
if let Some(def_id) = module.def_id() {
- let node_id = self.resolver.ast_map.as_local_node_id(def_id).unwrap();
+ let node_id = self.resolver.definitions.as_local_node_id(def_id).unwrap();
self.resolver.export_map.insert(node_id, reexports);
}
}
use std::io::Write;
-use rustc::hir::def_id::{DefId, DefIndex};
-use syntax::codemap::Span;
-
-use super::data::*;
+use super::external_data::*;
use super::dump::Dump;
-use super::span_utils::SpanUtils;
-pub struct CsvDumper<'tcx, 'b, W: 'b> {
- output: &'b mut W,
- span: SpanUtils<'tcx>
+pub struct CsvDumper<'b, W: 'b> {
+ output: &'b mut W
}
-impl<'a, 'b, W: Write> CsvDumper<'a, 'b, W> {
- pub fn new(writer: &'b mut W, span: SpanUtils<'a>) -> CsvDumper<'a, 'b, W> {
- CsvDumper { output: writer, span: span }
+impl<'b, W: Write> CsvDumper<'b, W> {
+ pub fn new(writer: &'b mut W) -> CsvDumper<'b, W> {
+ CsvDumper { output: writer }
}
- fn record(&mut self, kind: &str, span: Span, values: String) {
- let span_str = self.span.extent_str(span);
+ fn record(&mut self, kind: &str, span: SpanData, values: String) {
+ let span_str = span_extent_str(span);
if let Err(_) = write!(self.output, "{},{}{}\n", kind, span_str, values) {
error!("Error writing output");
}
}
}
-impl<'a, 'b, W: Write + 'b> Dump for CsvDumper<'a, 'b, W> {
+impl<'b, W: Write + 'b> Dump for CsvDumper<'b, W> {
fn crate_prelude(&mut self, data: CratePreludeData) {
let values = make_values_str(&[
("name", &data.crate_name),
}
fn enum_data(&mut self, data: EnumData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("qualname", &data.qualname),
}
fn extern_crate(&mut self, data: ExternCrateData) {
- let id = data.id.to_string();
+ let id = data.id.index.as_u32().to_string();
let crate_num = data.crate_num.to_string();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("name", &data.name),
let self_ref = data.self_ref.unwrap_or(null_def_id());
let trait_ref = data.trait_ref.unwrap_or(null_def_id());
- let id = data.id.to_string();
+ let id = data.id.index.as_u32().to_string();
let ref_id = self_ref.index.as_usize().to_string();
let ref_id_crate = self_ref.krate.to_string();
let trait_id = trait_ref.index.as_usize().to_string();
let trait_id_crate = trait_ref.krate.to_string();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("refid", &ref_id),
fn inheritance(&mut self, data: InheritanceData) {
let base_id = data.base_id.index.as_usize().to_string();
let base_crate = data.base_id.krate.to_string();
- let deriv_id = data.deriv_id.to_string();
- let deriv_crate = 0.to_string();
+ let deriv_id = data.deriv_id.index.as_u32().to_string();
+ let deriv_crate = data.deriv_id.krate.to_string();
let values = make_values_str(&[
("base", &base_id),
("basecrate", &base_crate),
None => (String::new(), String::new())
};
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("qualname", &data.qualname),
fn function_ref(&mut self, data: FunctionRefData) {
let ref_id = data.ref_id.index.as_usize().to_string();
let ref_crate = data.ref_id.krate.to_string();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("refid", &ref_id),
("refidcrate", &ref_crate),
let ref_id = data.ref_id.index.as_usize().to_string();
let ref_crate = data.ref_id.krate.to_string();
let qualname = String::new();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("refid", &ref_id),
("refidcrate", &ref_crate),
}
fn method(&mut self, data: MethodData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("qualname", &data.qualname),
let def_id = ref_id.index.as_usize().to_string();
let def_crate = ref_id.krate.to_string();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("refid", &def_id),
("refidcrate", &def_crate),
}
fn macro_use(&mut self, data: MacroUseData) {
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("callee_name", &data.name),
("qualname", &data.qualname),
}
fn mod_data(&mut self, data: ModData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("qualname", &data.qualname),
None => (0.to_string(), 0.to_string())
};
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("refid", &ref_id),
("refidcrate", &ref_crate),
}
fn struct_data(&mut self, data: StructData) {
- let id = data.id.to_string();
- let ctor_id = data.ctor_id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let ctor_id = data.ctor_id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("ctor_id", &ctor_id),
}
fn struct_variant(&mut self, data: StructVariantData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("ctor_id", &id),
}
fn trait_data(&mut self, data: TraitData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("qualname", &data.qualname),
}
fn tuple_variant(&mut self, data: TupleVariantData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("name", &data.name),
None => (0.to_string(), 0.to_string())
};
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("refid", &ref_id),
("refidcrate", &ref_crate),
}
fn typedef(&mut self, data: TypedefData) {
- let id = data.id.to_string();
+ let id = data.id.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("qualname", &data.qualname),
fn use_data(&mut self, data: UseData) {
let mod_id = data.mod_id.unwrap_or(null_def_id());
- let id = data.id.to_string();
+ let id = data.id.index.as_u32().to_string();
let ref_id = mod_id.index.as_usize().to_string();
let ref_crate = mod_id.krate.to_string();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("refid", &ref_id),
fn use_glob(&mut self, data: UseGlobData) {
let names = data.names.join(", ");
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("value", &names),
}
fn variable(&mut self, data: VariableData) {
- let id = data.id.to_string();
- let scope = data.scope.to_string();
+ let id = data.id.index.as_u32().to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("id", &id),
("name", &data.name),
fn variable_ref(&mut self, data: VariableRefData) {
let ref_id = data.ref_id.index.as_usize().to_string();
let ref_crate = data.ref_id.krate.to_string();
- let scope = data.scope.to_string();
+ let scope = data.scope.index.as_u32().to_string();
let values = make_values_str(&[
("refid", &ref_id),
("refidcrate", &ref_crate),
})
}
-fn null_def_id() -> DefId {
- DefId {
- krate: 0,
- index: DefIndex::new(0),
- }
+fn span_extent_str(span: SpanData) -> String {
+ format!("file_name,\"{}\",file_line,{},file_col,{},byte_start,{}\
+ file_line_end,{},file_col_end,{},byte_end,{}",
+ span.file_name, span.line_start, span.column_start, span.byte_start,
+ span.line_end, span.column_end, span.byte_end)
}
//! retrieve the data from a crate.
use rustc::hir::def_id::DefId;
-use rustc::ty;
use syntax::ast::{CrateNum, NodeId};
-use syntax::codemap::{Span, CodeMap};
-
-#[derive(Debug, Clone, RustcEncodable)]
-pub struct SpanData {
- file_name: String,
- byte_start: u32,
- byte_end: u32,
- /// 1-based.
- line_start: usize,
- line_end: usize,
- /// 1-based, character offset.
- column_start: usize,
- column_end: usize,
-}
-
-impl SpanData {
- pub fn from_span(span: Span, cm: &CodeMap) -> SpanData {
- let start = cm.lookup_char_pos(span.lo);
- let end = cm.lookup_char_pos(span.hi);
-
- SpanData {
- file_name: start.file.name.clone(),
- byte_start: span.lo.0,
- byte_end: span.hi.0,
- line_start: start.line,
- line_end: end.line,
- column_start: start.col.0 + 1,
- column_end: end.col.0 + 1,
- }
- }
-}
+use syntax::codemap::Span;
pub struct CrateData {
pub name: String,
pub scope: NodeId,
pub ref_id: DefId,
}
-
-// Emitted ids are used to cross-reference items across crates. DefIds and
-// NodeIds do not usually correspond in any way. The strategy is to use the
-// index from the DefId as a crate-local id. However, within a crate, DefId
-// indices and NodeIds can overlap. So, we must adjust the NodeIds. If an
-// item can be identified by a DefId as well as a NodeId, then we use the
-// DefId index as the id. If it can't, then we have to use the NodeId, but
-// need to adjust it so it will not clash with any possible DefId index.
-pub fn normalize_node_id<'a>(tcx: &ty::TyCtxt<'a>, id: NodeId) -> usize {
- match tcx.map.opt_local_def_id(id) {
- Some(id) => id.index.as_usize(),
- None => id as usize + tcx.map.num_local_def_ids()
- }
-}
-
-// Macro to implement a normalize() function (see below for usage)
-macro_rules! impl_normalize {
- ($($t:ty => $($field:ident),*);*) => {
- $(
- impl $t {
- pub fn normalize<'a>(mut self, tcx: &ty::TyCtxt<'a>) -> $t {
- $(
- self.$field = normalize_node_id(tcx, self.$field) as u32;
- )*
- self
- }
- }
- )*
- }
-}
-
-impl_normalize! {
- EnumData => id, scope;
- ExternCrateData => id, scope;
- FunctionCallData => scope;
- FunctionData => id, scope;
- FunctionRefData => scope;
- ImplData => id, scope;
- InheritanceData => deriv_id;
- MacroUseData => scope;
- MethodCallData => scope;
- MethodData => id, scope;
- ModData => id, scope;
- ModRefData => scope;
- StructData => ctor_id, id, scope;
- StructVariantData => id, scope;
- TupleVariantData => id, scope;
- TraitData => id, scope;
- TypedefData => id;
- TypeRefData => scope;
- UseData => id, scope;
- UseGlobData => id, scope;
- VariableData => id;
- VariableRefData => scope
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::data::*;
+use super::external_data::*;
pub trait Dump {
fn crate_prelude(&mut self, CratePreludeData) {}
use super::{escape, generated_code, SaveContext, PathCollector};
use super::data::*;
use super::dump::Dump;
+use super::external_data::Lower;
use super::span_utils::SpanUtils;
use super::recorder;
pub struct DumpVisitor<'l, 'tcx: 'l, 'll, D: 'll> {
save_ctxt: SaveContext<'l, 'tcx>,
sess: &'l Session,
- tcx: &'l TyCtxt<'tcx>,
+ tcx: TyCtxt<'l, 'tcx, 'tcx>,
analysis: &'l ty::CrateAnalysis<'l>,
dumper: &'ll mut D,
}
impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> {
- pub fn new(tcx: &'l TyCtxt<'tcx>,
+ pub fn new(tcx: TyCtxt<'l, 'tcx, 'tcx>,
save_ctxt: SaveContext<'l, 'tcx>,
analysis: &'l ty::CrateAnalysis<'l>,
dumper: &'ll mut D)
span: krate.span,
};
- self.dumper.crate_prelude(data);
+ self.dumper.crate_prelude(data.lower(self.tcx));
}
// Return all non-empty prefixes of a path.
qualname: qualname,
scope: self.cur_scope,
ref_id: None
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
qualname: qualname,
scope: self.cur_scope,
ref_id: None
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
span: *span,
qualname: qualname.to_owned(),
scope: 0
- });
+ }.lower(self.tcx));
// write the other sub-paths
if len <= 2 {
qualname: qualname.to_owned(),
scope: self.cur_scope,
ref_id: None
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
ref_id: Some(def_id),
scope: scope,
qualname: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Def::Struct(..) |
Def::Enum(..) |
ref_id: Some(def_id),
scope: scope,
qualname: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Def::Static(_, _) |
Def::Const(_) |
ref_id: def_id,
scope: scope,
name: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Def::Fn(..) => {
self.dumper.function_ref(FunctionRefData {
span: sub_span.expect("No span found for fn ref"),
ref_id: def_id,
scope: scope
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Def::SelfTy(..) |
Def::Label(_) |
type_value: typ,
value: String::new(),
scope: 0
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
}
if body.is_some() {
if !self.span.filter_generated(Some(method_data.span), span) {
- self.dumper.function(method_data.clone().normalize(&self.tcx));
+ self.dumper.function(method_data.clone().lower(self.tcx));
}
self.process_formals(&sig.decl.inputs, &method_data.qualname);
} else {
span: method_data.span,
scope: method_data.scope,
qualname: method_data.qualname.clone(),
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
self.process_generic_params(&sig.generics, span, &method_data.qualname, id);
let trait_ref_data = self.save_ctxt.get_trait_ref_data(trait_ref, self.cur_scope);
if let Some(trait_ref_data) = trait_ref_data {
if !self.span.filter_generated(Some(trait_ref_data.span), trait_ref.path.span) {
- self.dumper.type_ref(trait_ref_data.normalize(&self.tcx));
+ self.dumper.type_ref(trait_ref_data.lower(self.tcx));
}
visit::walk_path(self, &trait_ref.path);
let field_data = self.save_ctxt.get_field_data(field, parent_id);
if let Some(mut field_data) = field_data {
if !self.span.filter_generated(Some(field_data.span), field.span) {
- field_data.scope = normalize_node_id(&self.tcx, field_data.scope) as u32;
field_data.value = String::new();
- self.dumper.variable(field_data.normalize(&self.tcx));
+ self.dumper.variable(field_data.lower(self.tcx));
}
}
}
id: param.id,
qualname: name,
value: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
self.visit_generics(generics);
if let Some(fn_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(fn_data, FunctionData, item.span);
if !self.span.filter_generated(Some(fn_data.span), item.span) {
- self.dumper.function(fn_data.clone().normalize(&self.tcx));
+ self.dumper.function(fn_data.clone().lower(self.tcx));
}
self.process_formals(&decl.inputs, &fn_data.qualname);
if let Some(var_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(var_data, VariableData, item.span);
if !self.span.filter_generated(Some(var_data.span), item.span) {
- let mut var_data = var_data;
- var_data.scope = normalize_node_id(&self.tcx, var_data.scope) as u32;
- self.dumper.variable(var_data.normalize(&self.tcx));
+ self.dumper.variable(var_data.lower(self.tcx));
}
}
self.visit_ty(&typ);
qualname: qualname,
value: self.span.snippet(expr.span),
type_value: ty_to_string(&typ),
- scope: normalize_node_id(&self.tcx, self.cur_scope) as u32
- }.normalize(&self.tcx));
+ scope: self.cur_scope
+ }.lower(self.tcx));
}
// walk type and init value
qualname: qualname.clone(),
scope: self.cur_scope,
value: val
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Some(data) => data,
};
down_cast_data!(enum_data, EnumData, item.span);
- let normalized = enum_data.clone().normalize(&self.tcx);
- if !self.span.filter_generated(Some(normalized.span), item.span) {
- self.dumper.enum_data(normalized);
+ if !self.span.filter_generated(Some(enum_data.span), item.span) {
+ self.dumper.enum_data(enum_data.clone().lower(self.tcx));
}
for variant in &enum_definition.variants {
type_value: enum_data.qualname.clone(),
value: val,
scope: enum_data.scope
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
_ => {
type_value: enum_data.qualname.clone(),
value: val,
scope: enum_data.scope
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
}
if let Some(ref self_ref) = impl_data.self_ref {
has_self_ref = true;
if !self.span.filter_generated(Some(self_ref.span), item.span) {
- self.dumper.type_ref(self_ref.clone().normalize(&self.tcx));
+ self.dumper.type_ref(self_ref.clone().lower(self.tcx));
}
}
if let Some(ref trait_ref_data) = impl_data.trait_ref {
if !self.span.filter_generated(Some(trait_ref_data.span), item.span) {
- self.dumper.type_ref(trait_ref_data.clone().normalize(&self.tcx));
+ self.dumper.type_ref(trait_ref_data.clone().lower(self.tcx));
}
visit::walk_path(self, &trait_ref.as_ref().unwrap().path);
scope: impl_data.scope,
trait_ref: impl_data.trait_ref.map(|d| d.ref_id.unwrap()),
self_ref: impl_data.self_ref.map(|d| d.ref_id.unwrap())
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
if !has_self_ref {
qualname: qualname.clone(),
scope: self.cur_scope,
value: val
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
// super-traits
ref_id: Some(id),
scope: self.cur_scope,
qualname: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
if !self.span.filter_generated(sub_span, trait_ref.path.span) {
span: sub_span,
base_id: id,
deriv_id: item.id
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
}
if let Some(mod_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(mod_data, ModData, item.span);
if !self.span.filter_generated(Some(mod_data.span), item.span) {
- self.dumper.mod_data(mod_data.normalize(&self.tcx));
+ self.dumper.mod_data(mod_data.lower(self.tcx));
}
}
}
ref_id: Some(vrd.ref_id),
scope: vrd.scope,
qualname: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Some(recorder::FnRef) => {
self.dumper.function_ref(FunctionRefData {
span: vrd.span,
ref_id: vrd.ref_id,
scope: vrd.scope
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Some(recorder::ModRef) => {
self.dumper.mod_ref( ModRefData {
ref_id: Some(vrd.ref_id),
scope: vrd.scope,
qualname: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
Some(recorder::VarRef) | None
- => self.dumper.variable_ref(vrd.normalize(&self.tcx))
+ => self.dumper.variable_ref(vrd.lower(self.tcx))
}
}
}
Data::TypeRefData(trd) => {
if !self.span.filter_generated(Some(trd.span), path.span) {
- self.dumper.type_ref(trd.normalize(&self.tcx));
+ self.dumper.type_ref(trd.lower(self.tcx));
}
}
Data::MethodCallData(mcd) => {
if !self.span.filter_generated(Some(mcd.span), path.span) {
- self.dumper.method_call(mcd.normalize(&self.tcx));
+ self.dumper.method_call(mcd.lower(self.tcx));
}
}
Data::FunctionCallData(fcd) => {
if !self.span.filter_generated(Some(fcd.span), path.span) {
- self.dumper.function_call(fcd.normalize(&self.tcx));
+ self.dumper.function_call(fcd.lower(self.tcx));
}
}
_ => {
if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) {
down_cast_data!(struct_lit_data, TypeRefData, ex.span);
if !self.span.filter_generated(Some(struct_lit_data.span), ex.span) {
- self.dumper.type_ref(struct_lit_data.normalize(&self.tcx));
+ self.dumper.type_ref(struct_lit_data.lower(self.tcx));
}
let scope = self.save_ctxt.enclosing_scope(ex.id);
.get_field_ref_data(field, variant, scope) {
if !self.span.filter_generated(Some(field_data.span), field.ident.span) {
- self.dumper.variable_ref(field_data.normalize(&self.tcx));
+ self.dumper.variable_ref(field_data.lower(self.tcx));
}
}
if let Some(mcd) = self.save_ctxt.get_expr_data(ex) {
down_cast_data!(mcd, MethodCallData, ex.span);
if !self.span.filter_generated(Some(mcd.span), ex.span) {
- self.dumper.method_call(mcd.normalize(&self.tcx));
+ self.dumper.method_call(mcd.lower(self.tcx));
}
}
ref_id: f.did,
scope: self.cur_scope,
name: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
self.visit_pat(&field.pat);
value: value,
type_value: typ,
scope: 0
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
}
span: sub_span,
name: data.name.clone(),
qualname: qualname.clone()
- });
+ }.lower(self.tcx));
}
}
if !self.mac_uses.contains(&data.span) {
scope: data.scope,
callee_span: data.callee_span,
imported: data.imported
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
}
mod_id: mod_id,
name: ident.to_string(),
scope: self.cur_scope
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
self.write_sub_paths_truncated(path, true);
}
id: item.id,
names: names,
scope: self.cur_scope
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
self.write_sub_paths(path, true);
}
location: location,
span: alias_span.expect("No span found for extern crate"),
scope: self.cur_scope,
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
Fn(ref decl, _, _, _, ref ty_params, ref body) =>
id: item.id,
qualname: qualname.clone(),
value: value
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
self.visit_ty(&ty);
ref_id: Some(id),
scope: self.cur_scope,
qualname: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
if let Some(field_data) = self.save_ctxt.get_expr_data(ex) {
down_cast_data!(field_data, VariableRefData, ex.span);
if !self.span.filter_generated(Some(field_data.span), ex.span) {
- self.dumper.variable_ref(field_data.normalize(&self.tcx));
+ self.dumper.variable_ref(field_data.lower(self.tcx));
}
}
}
ref_id: def.struct_variant().fields[idx.node].did,
scope: self.cur_scope,
name: String::new()
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
ty::TyTuple(_) => {}
value: value,
type_value: String::new(),
scope: 0
- }.normalize(&self.tcx));
+ }.lower(self.tcx));
}
}
Def::Variant(..) | Def::Enum(..) |
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::hir::def_id::{DefId, DefIndex};
+use rustc::hir::map::Map;
+use rustc::ty::TyCtxt;
+use syntax::ast::{CrateNum, NodeId};
+use syntax::codemap::{Span, CodeMap};
+
+use super::data;
+
+// FIXME: this should be pub(crate), but the current snapshot doesn't allow it yet
+pub trait Lower {
+ type Target;
+ fn lower(self, tcx: TyCtxt) -> Self::Target;
+}
+
+fn make_def_id(id: NodeId, map: &Map) -> DefId {
+ map.opt_local_def_id(id).unwrap_or(null_def_id())
+}
+
+pub fn null_def_id() -> DefId {
+ DefId { krate: u32::max_value(), index: DefIndex::from_u32(u32::max_value()) }
+}
+
+#[derive(Clone, Debug, RustcEncodable)]
+pub struct SpanData {
+ pub file_name: String,
+ pub byte_start: u32,
+ pub byte_end: u32,
+ /// 1-based.
+ pub line_start: usize,
+ pub line_end: usize,
+ /// 1-based, character offset.
+ pub column_start: usize,
+ pub column_end: usize,
+}
+
+impl SpanData {
+ pub fn from_span(span: Span, cm: &CodeMap) -> SpanData {
+ let start = cm.lookup_char_pos(span.lo);
+ let end = cm.lookup_char_pos(span.hi);
+
+ SpanData {
+ file_name: start.file.name.clone(),
+ byte_start: span.lo.0,
+ byte_end: span.hi.0,
+ line_start: start.line,
+ line_end: end.line,
+ column_start: start.col.0 + 1,
+ column_end: end.col.0 + 1,
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct CratePreludeData {
+ pub crate_name: String,
+ pub crate_root: String,
+ pub external_crates: Vec<data::ExternalCrateData>,
+ pub span: SpanData,
+}
+
+impl Lower for data::CratePreludeData {
+ type Target = CratePreludeData;
+
+ fn lower(self, tcx: TyCtxt) -> CratePreludeData {
+ CratePreludeData {
+ crate_name: self.crate_name,
+ crate_root: self.crate_root,
+ external_crates: self.external_crates,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ }
+ }
+}
+
+/// Data for enum declarations.
+#[derive(Clone, Debug, RustcEncodable)]
+pub struct EnumData {
+ pub id: DefId,
+ pub value: String,
+ pub qualname: String,
+ pub span: SpanData,
+ pub scope: DefId,
+}
+
+impl Lower for data::EnumData {
+ type Target = EnumData;
+
+ fn lower(self, tcx: TyCtxt) -> EnumData {
+ EnumData {
+ id: make_def_id(self.id, &tcx.map),
+ value: self.value,
+ qualname: self.qualname,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+/// Data for extern crates.
+#[derive(Debug, RustcEncodable)]
+pub struct ExternCrateData {
+ pub id: DefId,
+ pub name: String,
+ pub crate_num: CrateNum,
+ pub location: String,
+ pub span: SpanData,
+ pub scope: DefId,
+}
+
+impl Lower for data::ExternCrateData {
+ type Target = ExternCrateData;
+
+ fn lower(self, tcx: TyCtxt) -> ExternCrateData {
+ ExternCrateData {
+ id: make_def_id(self.id, &tcx.map),
+ name: self.name,
+ crate_num: self.crate_num,
+ location: self.location,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+/// Data about a function call.
+#[derive(Debug, RustcEncodable)]
+pub struct FunctionCallData {
+ pub span: SpanData,
+ pub scope: DefId,
+ pub ref_id: DefId,
+}
+
+impl Lower for data::FunctionCallData {
+ type Target = FunctionCallData;
+
+ fn lower(self, tcx: TyCtxt) -> FunctionCallData {
+ FunctionCallData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ ref_id: self.ref_id,
+ }
+ }
+}
+
+/// Data for all kinds of functions and methods.
+#[derive(Clone, Debug, RustcEncodable)]
+pub struct FunctionData {
+ pub id: DefId,
+ pub name: String,
+ pub qualname: String,
+ pub declaration: Option<DefId>,
+ pub span: SpanData,
+ pub scope: DefId,
+}
+
+impl Lower for data::FunctionData {
+ type Target = FunctionData;
+
+ fn lower(self, tcx: TyCtxt) -> FunctionData {
+ FunctionData {
+ id: make_def_id(self.id, &tcx.map),
+ name: self.name,
+ qualname: self.qualname,
+ declaration: self.declaration,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+/// Data about a function call.
+#[derive(Debug, RustcEncodable)]
+pub struct FunctionRefData {
+ pub span: SpanData,
+ pub scope: DefId,
+ pub ref_id: DefId,
+}
+
+impl Lower for data::FunctionRefData {
+ type Target = FunctionRefData;
+
+ fn lower(self, tcx: TyCtxt) -> FunctionRefData {
+ FunctionRefData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ ref_id: self.ref_id,
+ }
+ }
+}
+#[derive(Debug, RustcEncodable)]
+pub struct ImplData {
+ pub id: DefId,
+ pub span: SpanData,
+ pub scope: DefId,
+ pub trait_ref: Option<DefId>,
+ pub self_ref: Option<DefId>,
+}
+
+impl Lower for data::ImplData {
+ type Target = ImplData;
+
+ fn lower(self, tcx: TyCtxt) -> ImplData {
+ ImplData {
+ id: make_def_id(self.id, &tcx.map),
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ trait_ref: self.trait_ref,
+ self_ref: self.self_ref,
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct InheritanceData {
+ pub span: SpanData,
+ pub base_id: DefId,
+ pub deriv_id: DefId
+}
+
+impl Lower for data::InheritanceData {
+ type Target = InheritanceData;
+
+ fn lower(self, tcx: TyCtxt) -> InheritanceData {
+ InheritanceData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ base_id: self.base_id,
+ deriv_id: make_def_id(self.deriv_id, &tcx.map)
+ }
+ }
+}
+
+/// Data about a macro declaration.
+#[derive(Debug, RustcEncodable)]
+pub struct MacroData {
+ pub span: SpanData,
+ pub name: String,
+ pub qualname: String,
+}
+
+impl Lower for data::MacroData {
+ type Target = MacroData;
+
+ fn lower(self, tcx: TyCtxt) -> MacroData {
+ MacroData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ name: self.name,
+ qualname: self.qualname,
+ }
+ }
+}
+
+/// Data about a macro use.
+#[derive(Debug, RustcEncodable)]
+pub struct MacroUseData {
+ pub span: SpanData,
+ pub name: String,
+ pub qualname: String,
+ // Because macro expansion happens before ref-ids are determined,
+ // we use the callee span to reference the associated macro definition.
+ pub callee_span: SpanData,
+ pub scope: DefId,
+ pub imported: bool,
+}
+
+impl Lower for data::MacroUseData {
+ type Target = MacroUseData;
+
+ fn lower(self, tcx: TyCtxt) -> MacroUseData {
+ MacroUseData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ name: self.name,
+ qualname: self.qualname,
+ callee_span: SpanData::from_span(self.callee_span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ imported: self.imported,
+ }
+ }
+}
+
+/// Data about a method call.
+#[derive(Debug, RustcEncodable)]
+pub struct MethodCallData {
+ pub span: SpanData,
+ pub scope: DefId,
+ pub ref_id: Option<DefId>,
+ pub decl_id: Option<DefId>,
+}
+
+impl Lower for data::MethodCallData {
+ type Target = MethodCallData;
+
+ fn lower(self, tcx: TyCtxt) -> MethodCallData {
+ MethodCallData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ ref_id: self.ref_id,
+ decl_id: self.decl_id,
+ }
+ }
+}
+
+/// Data for method declarations (methods with a body are treated as functions).
+#[derive(Clone, Debug, RustcEncodable)]
+pub struct MethodData {
+ pub id: DefId,
+ pub qualname: String,
+ pub span: SpanData,
+ pub scope: DefId,
+}
+
+impl Lower for data::MethodData {
+ type Target = MethodData;
+
+ fn lower(self, tcx: TyCtxt) -> MethodData {
+ MethodData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ id: make_def_id(self.id, &tcx.map),
+ qualname: self.qualname,
+ }
+ }
+}
+
+/// Data for modules.
+#[derive(Debug, RustcEncodable)]
+pub struct ModData {
+ pub id: DefId,
+ pub name: String,
+ pub qualname: String,
+ pub span: SpanData,
+ pub scope: DefId,
+ pub filename: String,
+}
+
+impl Lower for data::ModData {
+ type Target = ModData;
+
+ fn lower(self, tcx: TyCtxt) -> ModData {
+ ModData {
+ id: make_def_id(self.id, &tcx.map),
+ name: self.name,
+ qualname: self.qualname,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ filename: self.filename,
+ }
+ }
+}
+
+/// Data for a reference to a module.
+#[derive(Debug, RustcEncodable)]
+pub struct ModRefData {
+ pub span: SpanData,
+ pub scope: DefId,
+ pub ref_id: Option<DefId>,
+ pub qualname: String
+}
+
+impl Lower for data::ModRefData {
+ type Target = ModRefData;
+
+ fn lower(self, tcx: TyCtxt) -> ModRefData {
+ ModRefData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ ref_id: self.ref_id,
+ qualname: self.qualname,
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct StructData {
+ pub span: SpanData,
+ pub id: DefId,
+ pub ctor_id: DefId,
+ pub qualname: String,
+ pub scope: DefId,
+ pub value: String
+}
+
+impl Lower for data::StructData {
+ type Target = StructData;
+
+ fn lower(self, tcx: TyCtxt) -> StructData {
+ StructData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ id: make_def_id(self.id, &tcx.map),
+ ctor_id: make_def_id(self.ctor_id, &tcx.map),
+ qualname: self.qualname,
+ scope: make_def_id(self.scope, &tcx.map),
+ value: self.value
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct StructVariantData {
+ pub span: SpanData,
+ pub id: DefId,
+ pub qualname: String,
+ pub type_value: String,
+ pub value: String,
+ pub scope: DefId
+}
+
+impl Lower for data::StructVariantData {
+ type Target = StructVariantData;
+
+ fn lower(self, tcx: TyCtxt) -> StructVariantData {
+ StructVariantData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ id: make_def_id(self.id, &tcx.map),
+ qualname: self.qualname,
+ type_value: self.type_value,
+ value: self.value,
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct TraitData {
+ pub span: SpanData,
+ pub id: DefId,
+ pub qualname: String,
+ pub scope: DefId,
+ pub value: String
+}
+
+impl Lower for data::TraitData {
+ type Target = TraitData;
+
+ fn lower(self, tcx: TyCtxt) -> TraitData {
+ TraitData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ id: make_def_id(self.id, &tcx.map),
+ qualname: self.qualname,
+ scope: make_def_id(self.scope, &tcx.map),
+ value: self.value,
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct TupleVariantData {
+ pub span: SpanData,
+ pub id: DefId,
+ pub name: String,
+ pub qualname: String,
+ pub type_value: String,
+ pub value: String,
+ pub scope: DefId,
+}
+
+impl Lower for data::TupleVariantData {
+ type Target = TupleVariantData;
+
+ fn lower(self, tcx: TyCtxt) -> TupleVariantData {
+ TupleVariantData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ id: make_def_id(self.id, &tcx.map),
+ name: self.name,
+ qualname: self.qualname,
+ type_value: self.type_value,
+ value: self.value,
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+/// Data for a typedef.
+#[derive(Debug, RustcEncodable)]
+pub struct TypedefData {
+ pub id: DefId,
+ pub span: SpanData,
+ pub qualname: String,
+ pub value: String,
+}
+
+impl Lower for data::TypedefData {
+ type Target = TypedefData;
+
+ fn lower(self, tcx: TyCtxt) -> TypedefData {
+ TypedefData {
+ id: make_def_id(self.id, &tcx.map),
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ qualname: self.qualname,
+ value: self.value,
+ }
+ }
+}
+
+/// Data for a reference to a type or trait.
+#[derive(Clone, Debug, RustcEncodable)]
+pub struct TypeRefData {
+ pub span: SpanData,
+ pub scope: DefId,
+ pub ref_id: Option<DefId>,
+ pub qualname: String,
+}
+
+impl Lower for data::TypeRefData {
+ type Target = TypeRefData;
+
+ fn lower(self, tcx: TyCtxt) -> TypeRefData {
+ TypeRefData {
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ ref_id: self.ref_id,
+ qualname: self.qualname,
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct UseData {
+ pub id: DefId,
+ pub span: SpanData,
+ pub name: String,
+ pub mod_id: Option<DefId>,
+ pub scope: DefId
+}
+
+impl Lower for data::UseData {
+ type Target = UseData;
+
+ fn lower(self, tcx: TyCtxt) -> UseData {
+ UseData {
+ id: make_def_id(self.id, &tcx.map),
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ name: self.name,
+ mod_id: self.mod_id,
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+#[derive(Debug, RustcEncodable)]
+pub struct UseGlobData {
+ pub id: DefId,
+ pub span: SpanData,
+ pub names: Vec<String>,
+ pub scope: DefId
+}
+
+impl Lower for data::UseGlobData {
+ type Target = UseGlobData;
+
+ fn lower(self, tcx: TyCtxt) -> UseGlobData {
+ UseGlobData {
+ id: make_def_id(self.id, &tcx.map),
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ names: self.names,
+ scope: make_def_id(self.scope, &tcx.map),
+ }
+ }
+}
+
+/// Data for local and global variables (consts and statics).
+#[derive(Debug, RustcEncodable)]
+pub struct VariableData {
+ pub id: DefId,
+ pub name: String,
+ pub qualname: String,
+ pub span: SpanData,
+ pub scope: DefId,
+ pub value: String,
+ pub type_value: String,
+}
+
+impl Lower for data::VariableData {
+ type Target = VariableData;
+
+ fn lower(self, tcx: TyCtxt) -> VariableData {
+ VariableData {
+ id: make_def_id(self.id, &tcx.map),
+ name: self.name,
+ qualname: self.qualname,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ value: self.value,
+ type_value: self.type_value,
+ }
+ }
+}
+
+/// Data for the use of some item (e.g., the use of a local variable, which
+/// will refer to that variables declaration (by ref_id)).
+#[derive(Debug, RustcEncodable)]
+pub struct VariableRefData {
+ pub name: String,
+ pub span: SpanData,
+ pub scope: DefId,
+ pub ref_id: DefId,
+}
+
+impl Lower for data::VariableRefData {
+ type Target = VariableRefData;
+
+ fn lower(self, tcx: TyCtxt) -> VariableRefData {
+ VariableRefData {
+ name: self.name,
+ span: SpanData::from_span(self.span, tcx.sess.codemap()),
+ scope: make_def_id(self.scope, &tcx.map),
+ ref_id: self.ref_id,
+ }
+ }
+}
use std::io::Write;
use rustc_serialize::json::as_json;
-use syntax::codemap::CodeMap;
-use syntax::ast::CrateNum;
-
-use super::data::{self, SpanData};
+use super::external_data::*;
use super::dump::Dump;
-pub struct JsonDumper<'a, 'b, W: Write + 'b> {
+pub struct JsonDumper<'b, W: Write + 'b> {
output: &'b mut W,
- codemap: &'a CodeMap,
first: bool,
}
-impl<'a, 'b, W: Write> JsonDumper<'a, 'b, W> {
- pub fn new(writer: &'b mut W, codemap: &'a CodeMap) -> JsonDumper<'a, 'b, W> {
+impl<'b, W: Write> JsonDumper<'b, W> {
+ pub fn new(writer: &'b mut W) -> JsonDumper<'b, W> {
if let Err(_) = write!(writer, "[") {
error!("Error writing output");
}
- JsonDumper { output: writer, codemap:codemap, first: true }
+ JsonDumper { output: writer, first: true }
}
}
-impl<'a, 'b, W: Write> Drop for JsonDumper<'a, 'b, W> {
+impl<'b, W: Write> Drop for JsonDumper<'b, W> {
fn drop(&mut self) {
if let Err(_) = write!(self.output, "]") {
error!("Error writing output");
macro_rules! impl_fn {
($fn_name: ident, $data_type: ident) => {
- fn $fn_name(&mut self, data: data::$data_type) {
+ fn $fn_name(&mut self, data: $data_type) {
if self.first {
self.first = false;
} else {
error!("Error writing output");
}
}
- let data = data.lower(self.codemap);
if let Err(_) = write!(self.output, "{}", as_json(&data)) {
error!("Error writing output '{}'", as_json(&data));
}
}
}
-impl<'a, 'b, W: Write + 'b> Dump for JsonDumper<'a, 'b, W> {
+impl<'b, W: Write + 'b> Dump for JsonDumper<'b, W> {
impl_fn!(crate_prelude, CratePreludeData);
impl_fn!(enum_data, EnumData);
impl_fn!(extern_crate, ExternCrateData);
impl_fn!(variable, VariableData);
impl_fn!(variable_ref, VariableRefData);
}
-
-trait Lower {
- type Target;
- fn lower(self, cm: &CodeMap) -> Self::Target;
-}
-
-pub type Id = u32;
-
-#[derive(Debug, RustcEncodable)]
-pub struct CratePreludeData {
- pub crate_name: String,
- pub crate_root: String,
- pub external_crates: Vec<data::ExternalCrateData>,
- pub span: SpanData,
-}
-
-impl Lower for data::CratePreludeData {
- type Target = CratePreludeData;
-
- fn lower(self, cm: &CodeMap) -> CratePreludeData {
- CratePreludeData {
- crate_name: self.crate_name,
- crate_root: self.crate_root,
- external_crates: self.external_crates,
- span: SpanData::from_span(self.span, cm),
- }
- }
-}
-
-/// Data for enum declarations.
-#[derive(Clone, Debug, RustcEncodable)]
-pub struct EnumData {
- pub id: Id,
- pub value: String,
- pub qualname: String,
- pub span: SpanData,
- pub scope: Id,
-}
-
-impl Lower for data::EnumData {
- type Target = EnumData;
-
- fn lower(self, cm: &CodeMap) -> EnumData {
- EnumData {
- id: self.id,
- value: self.value,
- qualname: self.qualname,
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- }
- }
-}
-
-/// Data for extern crates.
-#[derive(Debug, RustcEncodable)]
-pub struct ExternCrateData {
- pub id: Id,
- pub name: String,
- pub crate_num: CrateNum,
- pub location: String,
- pub span: SpanData,
- pub scope: Id,
-}
-
-impl Lower for data::ExternCrateData {
- type Target = ExternCrateData;
-
- fn lower(self, cm: &CodeMap) -> ExternCrateData {
- ExternCrateData {
- id: self.id,
- name: self.name,
- crate_num: self.crate_num,
- location: self.location,
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- }
- }
-}
-
-/// Data about a function call.
-#[derive(Debug, RustcEncodable)]
-pub struct FunctionCallData {
- pub span: SpanData,
- pub scope: Id,
- pub ref_id: Id,
-}
-
-impl Lower for data::FunctionCallData {
- type Target = FunctionCallData;
-
- fn lower(self, cm: &CodeMap) -> FunctionCallData {
- FunctionCallData {
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- ref_id: self.ref_id.index.as_u32(),
- }
- }
-}
-
-/// Data for all kinds of functions and methods.
-#[derive(Clone, Debug, RustcEncodable)]
-pub struct FunctionData {
- pub id: Id,
- pub name: String,
- pub qualname: String,
- pub declaration: Option<Id>,
- pub span: SpanData,
- pub scope: Id,
-}
-
-impl Lower for data::FunctionData {
- type Target = FunctionData;
-
- fn lower(self, cm: &CodeMap) -> FunctionData {
- FunctionData {
- id: self.id,
- name: self.name,
- qualname: self.qualname,
- declaration: self.declaration.map(|id| id.index.as_u32()),
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- }
- }
-}
-
-/// Data about a function call.
-#[derive(Debug, RustcEncodable)]
-pub struct FunctionRefData {
- pub span: SpanData,
- pub scope: Id,
- pub ref_id: Id,
-}
-
-impl Lower for data::FunctionRefData {
- type Target = FunctionRefData;
-
- fn lower(self, cm: &CodeMap) -> FunctionRefData {
- FunctionRefData {
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- ref_id: self.ref_id.index.as_u32(),
- }
- }
-}
-#[derive(Debug, RustcEncodable)]
-pub struct ImplData {
- pub id: Id,
- pub span: SpanData,
- pub scope: Id,
- pub trait_ref: Option<Id>,
- pub self_ref: Option<Id>,
-}
-
-impl Lower for data::ImplData {
- type Target = ImplData;
-
- fn lower(self, cm: &CodeMap) -> ImplData {
- ImplData {
- id: self.id,
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- trait_ref: self.trait_ref.map(|id| id.index.as_u32()),
- self_ref: self.self_ref.map(|id| id.index.as_u32()),
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct InheritanceData {
- pub span: SpanData,
- pub base_id: Id,
- pub deriv_id: Id
-}
-
-impl Lower for data::InheritanceData {
- type Target = InheritanceData;
-
- fn lower(self, cm: &CodeMap) -> InheritanceData {
- InheritanceData {
- span: SpanData::from_span(self.span, cm),
- base_id: self.base_id.index.as_u32(),
- deriv_id: self.deriv_id
- }
- }
-}
-
-/// Data about a macro declaration.
-#[derive(Debug, RustcEncodable)]
-pub struct MacroData {
- pub span: SpanData,
- pub name: String,
- pub qualname: String,
-}
-
-impl Lower for data::MacroData {
- type Target = MacroData;
-
- fn lower(self, cm: &CodeMap) -> MacroData {
- MacroData {
- span: SpanData::from_span(self.span, cm),
- name: self.name,
- qualname: self.qualname,
- }
- }
-}
-
-/// Data about a macro use.
-#[derive(Debug, RustcEncodable)]
-pub struct MacroUseData {
- pub span: SpanData,
- pub name: String,
- pub qualname: String,
- // Because macro expansion happens before ref-ids are determined,
- // we use the callee span to reference the associated macro definition.
- pub callee_span: SpanData,
- pub scope: Id,
- pub imported: bool,
-}
-
-impl Lower for data::MacroUseData {
- type Target = MacroUseData;
-
- fn lower(self, cm: &CodeMap) -> MacroUseData {
- MacroUseData {
- span: SpanData::from_span(self.span, cm),
- name: self.name,
- qualname: self.qualname,
- callee_span: SpanData::from_span(self.callee_span, cm),
- scope: self.scope,
- imported: self.imported,
- }
- }
-}
-
-/// Data about a method call.
-#[derive(Debug, RustcEncodable)]
-pub struct MethodCallData {
- pub span: SpanData,
- pub scope: Id,
- pub ref_id: Option<Id>,
- pub decl_id: Option<Id>,
-}
-
-impl Lower for data::MethodCallData {
- type Target = MethodCallData;
-
- fn lower(self, cm: &CodeMap) -> MethodCallData {
- MethodCallData {
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- ref_id: self.ref_id.map(|id| id.index.as_u32()),
- decl_id: self.decl_id.map(|id| id.index.as_u32()),
- }
- }
-}
-
-/// Data for method declarations (methods with a body are treated as functions).
-#[derive(Clone, Debug, RustcEncodable)]
-pub struct MethodData {
- pub id: Id,
- pub qualname: String,
- pub span: SpanData,
- pub scope: Id,
-}
-
-impl Lower for data::MethodData {
- type Target = MethodData;
-
- fn lower(self, cm: &CodeMap) -> MethodData {
- MethodData {
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- id: self.id,
- qualname: self.qualname,
- }
- }
-}
-
-/// Data for modules.
-#[derive(Debug, RustcEncodable)]
-pub struct ModData {
- pub id: Id,
- pub name: String,
- pub qualname: String,
- pub span: SpanData,
- pub scope: Id,
- pub filename: String,
-}
-
-impl Lower for data::ModData {
- type Target = ModData;
-
- fn lower(self, cm: &CodeMap) -> ModData {
- ModData {
- id: self.id,
- name: self.name,
- qualname: self.qualname,
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- filename: self.filename,
- }
- }
-}
-
-/// Data for a reference to a module.
-#[derive(Debug, RustcEncodable)]
-pub struct ModRefData {
- pub span: SpanData,
- pub scope: Id,
- pub ref_id: Option<Id>,
- pub qualname: String
-}
-
-impl Lower for data::ModRefData {
- type Target = ModRefData;
-
- fn lower(self, cm: &CodeMap) -> ModRefData {
- ModRefData {
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- ref_id: self.ref_id.map(|id| id.index.as_u32()),
- qualname: self.qualname,
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct StructData {
- pub span: SpanData,
- pub id: Id,
- pub ctor_id: Id,
- pub qualname: String,
- pub scope: Id,
- pub value: String
-}
-
-impl Lower for data::StructData {
- type Target = StructData;
-
- fn lower(self, cm: &CodeMap) -> StructData {
- StructData {
- span: SpanData::from_span(self.span, cm),
- id: self.id,
- ctor_id: self.ctor_id,
- qualname: self.qualname,
- scope: self.scope,
- value: self.value
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct StructVariantData {
- pub span: SpanData,
- pub id: Id,
- pub qualname: String,
- pub type_value: String,
- pub value: String,
- pub scope: Id
-}
-
-impl Lower for data::StructVariantData {
- type Target = StructVariantData;
-
- fn lower(self, cm: &CodeMap) -> StructVariantData {
- StructVariantData {
- span: SpanData::from_span(self.span, cm),
- id: self.id,
- qualname: self.qualname,
- type_value: self.type_value,
- value: self.value,
- scope: self.scope,
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct TraitData {
- pub span: SpanData,
- pub id: Id,
- pub qualname: String,
- pub scope: Id,
- pub value: String
-}
-
-impl Lower for data::TraitData {
- type Target = TraitData;
-
- fn lower(self, cm: &CodeMap) -> TraitData {
- TraitData {
- span: SpanData::from_span(self.span, cm),
- id: self.id,
- qualname: self.qualname,
- scope: self.scope,
- value: self.value,
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct TupleVariantData {
- pub span: SpanData,
- pub id: Id,
- pub name: String,
- pub qualname: String,
- pub type_value: String,
- pub value: String,
- pub scope: Id,
-}
-
-impl Lower for data::TupleVariantData {
- type Target = TupleVariantData;
-
- fn lower(self, cm: &CodeMap) -> TupleVariantData {
- TupleVariantData {
- span: SpanData::from_span(self.span, cm),
- id: self.id,
- name: self.name,
- qualname: self.qualname,
- type_value: self.type_value,
- value: self.value,
- scope: self.scope,
- }
- }
-}
-
-/// Data for a typedef.
-#[derive(Debug, RustcEncodable)]
-pub struct TypedefData {
- pub id: Id,
- pub span: SpanData,
- pub qualname: String,
- pub value: String,
-}
-
-impl Lower for data::TypedefData {
- type Target = TypedefData;
-
- fn lower(self, cm: &CodeMap) -> TypedefData {
- TypedefData {
- id: self.id,
- span: SpanData::from_span(self.span, cm),
- qualname: self.qualname,
- value: self.value,
- }
- }
-}
-
-/// Data for a reference to a type or trait.
-#[derive(Clone, Debug, RustcEncodable)]
-pub struct TypeRefData {
- pub span: SpanData,
- pub scope: Id,
- pub ref_id: Option<Id>,
- pub qualname: String,
-}
-
-impl Lower for data::TypeRefData {
- type Target = TypeRefData;
-
- fn lower(self, cm: &CodeMap) -> TypeRefData {
- TypeRefData {
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- ref_id: self.ref_id.map(|id| id.index.as_u32()),
- qualname: self.qualname,
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct UseData {
- pub id: Id,
- pub span: SpanData,
- pub name: String,
- pub mod_id: Option<Id>,
- pub scope: Id
-}
-
-impl Lower for data::UseData {
- type Target = UseData;
-
- fn lower(self, cm: &CodeMap) -> UseData {
- UseData {
- id: self.id,
- span: SpanData::from_span(self.span, cm),
- name: self.name,
- mod_id: self.mod_id.map(|id| id.index.as_u32()),
- scope: self.scope,
- }
- }
-}
-
-#[derive(Debug, RustcEncodable)]
-pub struct UseGlobData {
- pub id: Id,
- pub span: SpanData,
- pub names: Vec<String>,
- pub scope: Id
-}
-
-impl Lower for data::UseGlobData {
- type Target = UseGlobData;
-
- fn lower(self, cm: &CodeMap) -> UseGlobData {
- UseGlobData {
- id: self.id,
- span: SpanData::from_span(self.span, cm),
- names: self.names,
- scope: self.scope,
- }
- }
-}
-
-/// Data for local and global variables (consts and statics).
-#[derive(Debug, RustcEncodable)]
-pub struct VariableData {
- pub id: Id,
- pub name: String,
- pub qualname: String,
- pub span: SpanData,
- pub scope: Id,
- pub value: String,
- pub type_value: String,
-}
-
-impl Lower for data::VariableData {
- type Target = VariableData;
-
- fn lower(self, cm: &CodeMap) -> VariableData {
- VariableData {
- id: self.id,
- name: self.name,
- qualname: self.qualname,
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- value: self.value,
- type_value: self.type_value,
- }
- }
-}
-
-/// Data for the use of some item (e.g., the use of a local variable, which
-/// will refer to that variables declaration (by ref_id)).
-#[derive(Debug, RustcEncodable)]
-pub struct VariableRefData {
- pub name: String,
- pub span: SpanData,
- pub scope: Id,
- pub ref_id: Id,
-}
-
-impl Lower for data::VariableRefData {
- type Target = VariableRefData;
-
- fn lower(self, cm: &CodeMap) -> VariableRefData {
- VariableRefData {
- name: self.name,
- span: SpanData::from_span(self.span, cm),
- scope: self.scope,
- ref_id: self.ref_id.index.as_u32(),
- }
- }
-}
#[macro_use] extern crate syntax;
extern crate serialize as rustc_serialize;
+mod csv_dumper;
+mod json_dumper;
+mod data;
+mod dump;
+mod dump_visitor;
+pub mod external_data;
+#[macro_use]
+pub mod span_utils;
+
use rustc::hir;
use rustc::hir::map::NodeItem;
use rustc::hir::def::Def;
use syntax::visit::{self, Visitor};
use syntax::print::pprust::ty_to_string;
-mod csv_dumper;
-mod json_dumper;
-mod data;
-mod dump;
-mod dump_visitor;
-#[macro_use]
-pub mod span_utils;
-
pub use self::csv_dumper::CsvDumper;
pub use self::json_dumper::JsonDumper;
pub use self::data::*;
}
pub struct SaveContext<'l, 'tcx: 'l> {
- tcx: &'l TyCtxt<'tcx>,
+ tcx: TyCtxt<'l, 'tcx, 'tcx>,
span_utils: SpanUtils<'tcx>,
}
);
impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> {
- pub fn new(tcx: &'l TyCtxt<'tcx>) -> SaveContext<'l, 'tcx> {
+ pub fn new(tcx: TyCtxt<'l, 'tcx, 'tcx>) -> SaveContext<'l, 'tcx> {
let span_utils = SpanUtils::new(&tcx.sess);
SaveContext::from_span_utils(tcx, span_utils)
}
- pub fn from_span_utils(tcx: &'l TyCtxt<'tcx>,
+ pub fn from_span_utils(tcx: TyCtxt<'l, 'tcx, 'tcx>,
span_utils: SpanUtils<'tcx>)
-> SaveContext<'l, 'tcx> {
SaveContext {
}
}
-pub fn process_crate<'l, 'tcx>(tcx: &'l TyCtxt<'tcx>,
+pub fn process_crate<'l, 'tcx>(tcx: TyCtxt<'l, 'tcx, 'tcx>,
krate: &ast::Crate,
analysis: &'l ty::CrateAnalysis<'l>,
cratename: &str,
root_path.pop();
let output = &mut output_file;
- let utils: SpanUtils<'tcx> = SpanUtils::new(&tcx.sess);
let save_ctxt = SaveContext::new(tcx);
macro_rules! dump {
}
match format {
- Format::Csv => dump!(CsvDumper::new(output, utils)),
- Format::Json => dump!(JsonDumper::new(output, utils.sess.codemap())),
+ Format::Csv => dump!(CsvDumper::new(output)),
+ Format::Json => dump!(JsonDumper::new(output)),
}
}
use syntax::ast;
use syntax::codemap::*;
-use syntax::parse::lexer;
-use syntax::parse::lexer::{Reader, StringReader};
-use syntax::parse::token;
-use syntax::parse::token::{keywords, Token};
+use syntax::parse::lexer::{self, Reader, StringReader};
+use syntax::parse::token::{self, keywords, Token};
#[derive(Clone)]
pub struct SpanUtils<'a> {
}
}
- // Standard string for extents/location.
- #[rustfmt_skip]
- pub fn extent_str(&self, span: Span) -> String {
- let lo_loc = self.sess.codemap().lookup_char_pos(span.lo);
- let hi_loc = self.sess.codemap().lookup_char_pos(span.hi);
- let lo_pos = self.sess.codemap().bytepos_to_file_charpos(span.lo);
- let hi_pos = self.sess.codemap().bytepos_to_file_charpos(span.hi);
- let lo_pos_byte = self.sess.codemap().lookup_byte_offset(span.lo).pos;
- let hi_pos_byte = self.sess.codemap().lookup_byte_offset(span.hi).pos;
-
- format!("file_name,\"{}\",file_line,{},file_col,{},extent_start,{},extent_start_bytes,{},\
- file_line_end,{},file_col_end,{},extent_end,{},extent_end_bytes,{}",
- SpanUtils::make_path_string(&lo_loc.file.name),
- lo_loc.line, lo_loc.col.to_usize(), lo_pos.to_usize(), lo_pos_byte.to_usize(),
- hi_loc.line, hi_loc.col.to_usize(), hi_pos.to_usize(), hi_pos_byte.to_usize())
- }
-
// sub_span starts at span.lo, so we need to adjust the positions etc.
// If sub_span is None, we don't need to adjust.
pub fn make_sub_span(&self, span: Span, sub_span: Option<Span>) -> Option<Span> {
use rustc::hir::def::{Def, DefMap};
use rustc::hir::def_id::DefId;
use middle::expr_use_visitor as euv;
-use rustc::infer;
use middle::lang_items::StrEqFnLangItem;
use middle::mem_categorization as mc;
use middle::mem_categorization::Categorization;
struct ConstantExpr<'a>(&'a hir::Expr);
impl<'a> ConstantExpr<'a> {
- fn eq(self, other: ConstantExpr<'a>, tcx: &TyCtxt) -> bool {
+ fn eq<'b, 'tcx>(self, other: ConstantExpr<'a>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool {
match compare_lit_exprs(tcx, self.0, other.0) {
Some(result) => result == Ordering::Equal,
None => bug!("compare_list_exprs: type mismatch"),
DebugLoc),
}
-impl<'a, 'tcx> Opt<'a, 'tcx> {
- fn eq(&self, other: &Opt<'a, 'tcx>, tcx: &TyCtxt<'tcx>) -> bool {
+impl<'a, 'b, 'tcx> Opt<'a, 'tcx> {
+ fn eq(&self, other: &Opt<'a, 'tcx>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool {
match (self, other) {
(&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx),
(&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => {
any_pat!(m, col, PatKind::Ref(..))
}
-fn any_irrefutable_adt_pat(tcx: &TyCtxt, m: &[Match], col: usize) -> bool {
+fn any_irrefutable_adt_pat(tcx: TyCtxt, m: &[Match], col: usize) -> bool {
m.iter().any(|br| {
let pat = br.pats[col];
match pat.node {
field: field,
reassigned: false
};
- {
- let infcx = infer::normalizing_infer_ctxt(bcx.tcx(),
- &bcx.tcx().tables,
- ProjectionMode::Any);
+ bcx.tcx().normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx);
visitor.walk_expr(body);
- }
+ });
rc.reassigned
}
let llmatch;
let trmode;
- let moves_by_default = variable_ty.moves_by_default(¶m_env, span);
+ let moves_by_default = variable_ty.moves_by_default(tcx, ¶m_env, span);
match bm {
hir::BindByValue(_) if !moves_by_default || reassigned =>
{
if dtor { DTOR_NEEDED } else { 0 }
}
-pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; }
-impl<'tcx> GetDtorType<'tcx> for TyCtxt<'tcx> {
- fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 }
+pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; }
+impl<'a, 'tcx> GetDtorType<'tcx> for TyCtxt<'a, 'tcx, 'tcx> {
+ fn dtor_type(self) -> Ty<'tcx> { self.types.u8 }
}
fn dtor_active(flag: u8) -> bool {
/// This represents the (GEP) indices to follow to get to the discriminant field
pub type DiscrField = Vec<usize>;
-fn find_discr_field_candidate<'tcx>(tcx: &TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- mut path: DiscrField) -> Option<DiscrField> {
+fn find_discr_field_candidate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ mut path: DiscrField)
+ -> Option<DiscrField> {
match ty.sty {
// Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => {
}
}
-fn get_cases<'tcx>(tcx: &TyCtxt<'tcx>,
- adt: ty::AdtDef<'tcx>,
- substs: &subst::Substs<'tcx>)
- -> Vec<Case<'tcx>> {
+fn get_cases<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ adt: ty::AdtDef<'tcx>,
+ substs: &subst::Substs<'tcx>)
+ -> Vec<Case<'tcx>> {
adt.variants.iter().map(|vi| {
let field_tys = vi.fields.iter().map(|field| {
monomorphize::field_ty(tcx, substs, field)
}
}
-pub fn ty_of_inttype<'tcx>(tcx: &TyCtxt<'tcx>, ity: IntType) -> Ty<'tcx> {
+pub fn ty_of_inttype<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ity: IntType) -> Ty<'tcx> {
match ity {
attr::SignedInt(t) => tcx.mk_mach_int(t),
attr::UnsignedInt(t) => tcx.mk_mach_uint(t)
}
-pub fn build_link_meta(tcx: &TyCtxt,
- name: &str)
- -> LinkMeta {
+pub fn build_link_meta<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ name: &str)
+ -> LinkMeta {
let r = LinkMeta {
crate_name: name.to_owned(),
crate_hash: tcx.calculate_krate_hash(),
use rustc::middle::cstore;
use rustc::hir::def_id::DefId;
-use rustc::ty::{self, TypeFoldable};
+use rustc::ty::{self, TyCtxt, TypeFoldable};
use rustc::ty::item_path::{ItemPathBuffer, RootMode};
use rustc::hir::map::definitions::{DefPath, DefPathData};
use syntax::parse::token::{self, InternedString};
use serialize::hex::ToHex;
-pub fn def_id_to_string<'tcx>(tcx: &ty::TyCtxt<'tcx>, def_id: DefId) -> String {
+pub fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> String {
let def_path = tcx.def_path(def_id);
def_path_to_string(tcx, &def_path)
}
-pub fn def_path_to_string<'tcx>(tcx: &ty::TyCtxt<'tcx>, def_path: &DefPath) -> String {
+pub fn def_path_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_path: &DefPath) -> String {
let mut s = String::with_capacity(def_path.data.len() * 16);
s.push_str(&tcx.crate_name(def_path.krate));
}
// Sanity check
- assert!(trans.modules.len() == sess.opts.cg.codegen_units);
+ assert!(trans.modules.len() == sess.opts.cg.codegen_units ||
+ sess.opts.debugging_opts.incremental.is_some());
let tm = create_target_machine(sess);
use llvm;
use rustc::cfg;
use rustc::hir::def_id::DefId;
-use rustc::infer;
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
use middle::weak_lang_items;
use rustc::hir::pat_util::simple_name;
use common::{type_is_immediate, type_is_zero_size, val_ty};
use common;
use consts;
-use context::SharedCrateContext;
+use context::{SharedCrateContext, CrateContextList};
use controlflow;
use datum;
use debuginfo::{self, DebugLoc, ToDebugLoc};
use meth;
use mir;
use monomorphize::{self, Instance};
-use partitioning::{self, PartitioningStrategy, InstantiationMode};
+use partitioning::{self, PartitioningStrategy, InstantiationMode, CodegenUnit};
use symbol_names_test;
use tvec;
use type_::Type;
}
}
-pub fn custom_coerce_unsize_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
+pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>)
-> CustomCoerceUnsized {
subst::VecPerParamSpace::empty());
let trait_ref = ty::Binder(ty::TraitRef {
- def_id: ccx.tcx().lang_items.coerce_unsized_trait().unwrap(),
- substs: ccx.tcx().mk_substs(trait_substs)
+ def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(),
+ substs: scx.tcx().mk_substs(trait_substs)
});
- match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+ match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
- ccx.tcx().custom_coerce_unsized_kind(impl_def_id)
+ scx.tcx().custom_coerce_unsized_kind(impl_def_id)
}
vtable => {
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
}
}
-fn build_cfg(tcx: &TyCtxt, id: ast::NodeId) -> (ast::NodeId, Option<cfg::CFG>) {
+fn build_cfg<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ id: ast::NodeId)
+ -> (ast::NodeId, Option<cfg::CFG>) {
let blk = match tcx.map.find(id) {
Some(hir_map::NodeItem(i)) => {
match i.node {
// part of a larger expression that may have already partially-filled the
// return slot alloca. This can cause errors related to clean-up due to
// the clobbering of the existing value in the return slot.
-fn has_nested_returns(tcx: &TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
+fn has_nested_returns(tcx: TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
for index in cfg.graph.depth_traverse(cfg.entry) {
let n = cfg.graph.node_data(index);
match tcx.map.find(n.id()) {
closure_env: closure::ClosureEnv) {
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
- if collector::collecting_debug_information(ccx) {
+ if collector::collecting_debug_information(ccx.shared()) {
ccx.record_translation_item_as_generated(TransItem::Fn(instance));
}
let _icx = push_ctxt("trans_closure");
- attributes::emit_uwtable(llfndecl, true);
+ if !ccx.sess().no_landing_pads() {
+ attributes::emit_uwtable(llfndecl, true);
+ }
debug!("trans_closure(..., {})", instance);
let fn_ty = ccx.tcx().lookup_item_type(def_id).ty;
let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fn_ty);
let sig = ccx.tcx().erase_late_bound_regions(fn_ty.fn_sig());
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = ccx.tcx().normalize_associated_type(&sig);
let abi = fn_ty.fn_abi();
trans_closure(ccx,
decl,
let ccx = bcx.fcx.ccx;
let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = ccx.tcx().normalize_associated_type(&sig);
let result_ty = sig.output.unwrap();
// Get location to store the result. If the user does not care about
let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = ccx.tcx().normalize_associated_type(&sig);
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
let (arena, fcx): (TypedArena<_>, FunctionContext);
// `llval` is a translation of an item defined in a separate
// compilation unit. This only makes sense if there are at least
// two compilation units.
- assert!(ccx.sess().opts.cg.codegen_units > 1);
+ assert!(ccx.sess().opts.cg.codegen_units > 1 ||
+ ccx.sess().opts.debugging_opts.incremental.is_some());
// `llval` is a copy of something defined elsewhere, so use
// `AvailableExternallyLinkage` to avoid duplicating code in the
// output.
/// Find any symbols that are defined in one compilation unit, but not declared
/// in any other compilation unit. Give these symbols internal linkage.
-fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<&str>) {
+fn internalize_symbols(cx: &CrateContextList, reachable: &HashSet<&str>) {
unsafe {
let mut declared = HashSet::new();
// when using MSVC linker. We do this only for data, as linker can fix up
// code references on its own.
// See #26591, #27438
-fn create_imps(cx: &SharedCrateContext) {
+fn create_imps(cx: &CrateContextList) {
// The x86 ABI seems to require that leading underscores are added to symbol
// names, so we need an extra underscore on 32-bit. There's also a leading
// '\x01' here which disables LLVM's symbol mangling (e.g. no extra
// underscores added in front).
- let prefix = if cx.sess().target.target.target_pointer_width == "32" {
+ let prefix = if cx.shared().sess().target.target.target_pointer_width == "32" {
"\x01__imp__"
} else {
"\x01__imp_"
///
/// This list is later used by linkers to determine the set of symbols needed to
/// be exposed from a dynamic library and it's also encoded into the metadata.
-pub fn filter_reachable_ids(ccx: &SharedCrateContext) -> NodeSet {
- ccx.reachable().iter().map(|x| *x).filter(|id| {
+pub fn filter_reachable_ids(scx: &SharedCrateContext) -> NodeSet {
+ scx.reachable().iter().map(|x| *x).filter(|id| {
// First, only worry about nodes which have a symbol name
- ccx.item_symbols().borrow().contains_key(id)
+ scx.item_symbols().borrow().contains_key(id)
}).filter(|&id| {
// Next, we want to ignore some FFI functions that are not exposed from
// this crate. Reachable FFI functions can be lumped into two
//
// As a result, if this id is an FFI item (foreign item) then we only
// let it through if it's included statically.
- match ccx.tcx().map.get(id) {
+ match scx.tcx().map.get(id) {
hir_map::NodeForeignItem(..) => {
- ccx.sess().cstore.is_statically_included_foreign_item(id)
+ scx.sess().cstore.is_statically_included_foreign_item(id)
}
_ => true,
}
}).collect()
}
-pub fn trans_crate<'tcx>(tcx: &TyCtxt<'tcx>,
- mir_map: &MirMap<'tcx>,
- analysis: ty::CrateAnalysis)
- -> CrateTranslation {
+pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ mir_map: &MirMap<'tcx>,
+ analysis: ty::CrateAnalysis)
+ -> CrateTranslation {
let _task = tcx.dep_graph.in_task(DepNode::TransCrate);
// Be careful with this krate: obviously it gives access to the
tcx.sess.opts.debug_assertions
};
- let link_meta = link::build_link_meta(&tcx, name);
+ let link_meta = link::build_link_meta(tcx, name);
- let codegen_units = tcx.sess.opts.cg.codegen_units;
- let shared_ccx = SharedCrateContext::new(&link_meta.crate_name,
- codegen_units,
- tcx,
+ let shared_ccx = SharedCrateContext::new(tcx,
&mir_map,
export_map,
Sha256::new(),
check_overflow,
check_dropflag);
+ let codegen_units = collect_and_partition_translation_items(&shared_ccx);
+ let codegen_unit_count = codegen_units.len();
+ assert!(tcx.sess.opts.cg.codegen_units == codegen_unit_count ||
+ tcx.sess.opts.debugging_opts.incremental.is_some());
+
+ let crate_context_list = CrateContextList::new(&shared_ccx, codegen_units);
+
{
- let ccx = shared_ccx.get_ccx(0);
- collect_translation_items(&ccx);
+ let ccx = crate_context_list.get_ccx(0);
// Translate all items. See `TransModVisitor` for
// details on why we walk in this particular way.
krate.visit_all_items(&mut TransModVisitor { ccx: &ccx });
}
- collector::print_collection_results(&ccx);
+ collector::print_collection_results(ccx.shared());
symbol_names_test::report_symbol_names(&ccx);
}
- for ccx in shared_ccx.iter() {
+ for ccx in crate_context_list.iter() {
if ccx.sess().opts.debuginfo != NoDebugInfo {
debuginfo::finalize(&ccx);
}
}
}
- let modules = shared_ccx.iter()
+ let modules = crate_context_list.iter()
.map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
.collect();
}
}
- if codegen_units > 1 {
- internalize_symbols(&shared_ccx,
+ if codegen_unit_count > 1 {
+ internalize_symbols(&crate_context_list,
&reachable_symbols.iter().map(|x| &x[..]).collect());
}
if sess.target.target.options.is_like_msvc &&
sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
- create_imps(&shared_ccx);
+ create_imps(&crate_context_list);
}
let metadata_module = ModuleTranslation {
}
}
-fn collect_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
- let time_passes = ccx.sess().time_passes();
+fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
+ -> Vec<CodegenUnit<'tcx>> {
+ let time_passes = scx.sess().time_passes();
- let collection_mode = match ccx.sess().opts.debugging_opts.print_trans_items {
+ let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items {
Some(ref s) => {
let mode_string = s.to_lowercase();
let mode_string = mode_string.trim();
let message = format!("Unknown codegen-item collection mode '{}'. \
Falling back to 'lazy' mode.",
mode_string);
- ccx.sess().warn(&message);
+ scx.sess().warn(&message);
}
TransItemCollectionMode::Lazy
};
let (items, reference_map) = time(time_passes, "translation item collection", || {
- collector::collect_crate_translation_items(&ccx, collection_mode)
+ collector::collect_crate_translation_items(scx, collection_mode)
});
- let strategy = if ccx.sess().opts.debugging_opts.incremental.is_some() {
+ let strategy = if scx.sess().opts.debugging_opts.incremental.is_some() {
PartitioningStrategy::PerModule
} else {
- PartitioningStrategy::FixedUnitCount(ccx.sess().opts.cg.codegen_units)
+ PartitioningStrategy::FixedUnitCount(scx.sess().opts.cg.codegen_units)
};
let codegen_units = time(time_passes, "codegen unit partitioning", || {
- partitioning::partition(ccx.tcx(),
+ partitioning::partition(scx.tcx(),
items.iter().cloned(),
strategy,
&reference_map)
});
- if ccx.sess().opts.debugging_opts.print_trans_items.is_some() {
+ if scx.sess().opts.debugging_opts.print_trans_items.is_some() {
let mut item_to_cgus = HashMap::new();
- for cgu in codegen_units {
- for (trans_item, linkage) in cgu.items {
+ for cgu in &codegen_units {
+ for (&trans_item, &linkage) in &cgu.items {
item_to_cgus.entry(trans_item)
.or_insert(Vec::new())
.push((cgu.name.clone(), linkage));
let mut item_keys: Vec<_> = items
.iter()
.map(|i| {
- let mut output = i.to_string(ccx);
+ let mut output = i.to_string(scx.tcx());
output.push_str(" @@");
let mut empty = Vec::new();
let mut cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
println!("TRANS_ITEM {}", item);
}
- let mut ccx_map = ccx.translation_items().borrow_mut();
+ let mut ccx_map = scx.translation_items().borrow_mut();
for cgi in items {
ccx_map.insert(cgi, TransItemState::PredictedButNotGenerated);
}
}
+
+ codegen_units
}
use llvm::{self, ValueRef, get_params};
use middle::cstore::LOCAL_CRATE;
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::ty::subst;
use rustc::traits;
use rustc::hir::map as hir_map;
/// Trait or impl method.
pub fn method<'blk>(bcx: Block<'blk, 'tcx>,
method: ty::MethodCallee<'tcx>) -> Callee<'tcx> {
- let substs = bcx.tcx().mk_substs(bcx.fcx.monomorphize(&method.substs));
+ let substs = bcx.fcx.monomorphize(&method.substs);
Callee::def(bcx.ccx(), method.def_id, substs)
}
let method_item = tcx.impl_or_trait_item(def_id);
let trait_id = method_item.container().id();
let trait_ref = ty::Binder(substs.to_trait_ref(tcx, trait_id));
- let trait_ref = infer::normalize_associated_type(tcx, &trait_ref);
- match common::fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+ let trait_ref = tcx.normalize_associated_type(&trait_ref);
+ match common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref) {
traits::VtableImpl(vtable_impl) => {
let impl_did = vtable_impl.impl_def_id;
let mname = tcx.item_name(def_id);
let method_ty = def_ty(tcx, def_id, substs);
let fn_ptr_ty = match method_ty.sty {
- ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
+ ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty),
_ => bug!("expected fn item type, found {}",
method_ty)
};
let method_ty = def_ty(tcx, def_id, substs);
let fn_ptr_ty = match method_ty.sty {
- ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
+ ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty),
_ => bug!("expected fn item type, found {}",
method_ty)
};
}
traits::VtableObject(ref data) => {
Callee {
- data: Virtual(traits::get_vtable_index_of_object_method(
- tcx, data, def_id)),
+ data: Virtual(tcx.get_vtable_index_of_object_method(data, def_id)),
ty: def_ty(tcx, def_id, substs)
}
}
extra_args: &[Ty<'tcx>]) -> FnType {
let abi = self.ty.fn_abi();
let sig = ccx.tcx().erase_late_bound_regions(self.ty.fn_sig());
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = ccx.tcx().normalize_associated_type(&sig);
let mut fn_ty = FnType::unadjusted(ccx, abi, &sig, extra_args);
if let Virtual(_) = self.data {
// Don't pass the vtable, it's not an argument of the virtual fn.
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>)
-> Datum<'tcx, Rvalue> {
let fn_ptr_ty = match self.ty.sty {
- ty::TyFnDef(_, _, f) => ccx.tcx().mk_ty(ty::TyFnPtr(f)),
+ ty::TyFnDef(_, _, f) => ccx.tcx().mk_fn_ptr(f),
_ => self.ty
};
match self.data {
}
/// Given a DefId and some Substs, produces the monomorphic item type.
-fn def_ty<'tcx>(tcx: &TyCtxt<'tcx>,
- def_id: DefId,
- substs: &'tcx subst::Substs<'tcx>)
- -> Ty<'tcx> {
+fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ substs: &'tcx subst::Substs<'tcx>)
+ -> Ty<'tcx> {
let ty = tcx.lookup_item_type(def_id).ty;
monomorphize::apply_param_substs(tcx, substs, &ty)
}
}
};
let sig = tcx.erase_late_bound_regions(sig);
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = ccx.tcx().normalize_associated_type(&sig);
let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec());
let sig = ty::FnSig {
inputs: vec![bare_fn_ty_maybe_ref,
variadic: false
};
let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
- let tuple_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+ let tuple_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::RustCall,
sig: ty::Binder(sig)
- });
+ }));
debug!("tuple_fn_ty: {:?}", tuple_fn_ty);
//
// def_id to the local id of the inlined copy.
let def_id = inline::maybe_instantiate_inline(ccx, def_id);
- fn is_named_tuple_constructor(tcx: &TyCtxt, def_id: DefId) -> bool {
+ fn is_named_tuple_constructor(tcx: TyCtxt, def_id: DefId) -> bool {
let node_id = match tcx.map.as_local_node_id(def_id) {
Some(n) => n,
None => { return false; }
let fn_ptr_ty = match fn_ty.sty {
ty::TyFnDef(_, _, fty) => {
// Create a fn pointer with the substituted signature.
- tcx.mk_ty(ty::TyFnPtr(fty))
+ tcx.mk_fn_ptr(fty)
}
_ => bug!("expected fn item type, found {}", fn_ty)
};
// Find the actual function pointer.
let ty = ccx.tcx().lookup_item_type(def_id).ty;
let fn_ptr_ty = match ty.sty {
- ty::TyFnDef(_, _, fty) => {
+ ty::TyFnDef(_, _, ref fty) => {
// Create a fn pointer with the normalized signature.
- tcx.mk_fn_ptr(infer::normalize_associated_type(tcx, fty))
+ tcx.mk_fn_ptr(tcx.normalize_associated_type(fty))
}
_ => bug!("expected fn item type, found {}", ty)
};
let abi = callee.ty.fn_abi();
let sig = callee.ty.fn_sig();
let output = bcx.tcx().erase_late_bound_regions(&sig.output());
- let output = infer::normalize_associated_type(bcx.tcx(), &output);
+ let output = bcx.tcx().normalize_associated_type(&output);
let extra_args = match args {
ArgExprs(args) if abi != Abi::RustCall => {
}
}
-pub fn temporary_scope(tcx: &TyCtxt,
+pub fn temporary_scope(tcx: TyCtxt,
id: ast::NodeId)
-> ScopeId {
match tcx.region_maps.temporary_scope(id) {
}
}
-pub fn var_scope(tcx: &TyCtxt,
+pub fn var_scope(tcx: TyCtxt,
id: ast::NodeId)
-> ScopeId {
let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps));
use back::symbol_names;
use llvm::{ValueRef, get_param, get_params};
use rustc::hir::def_id::DefId;
-use rustc::infer;
-use rustc::traits::ProjectionMode;
use abi::{Abi, FnType};
use adt;
use attributes;
}
}
-fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>,
- closure_id: DefId,
- fn_ty: Ty<'tcx>)
- -> Ty<'tcx> {
+fn get_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ closure_id: DefId,
+ fn_ty: Ty<'tcx>)
+ -> Ty<'tcx> {
match tcx.closure_kind(closure_id) {
ty::ClosureKind::Fn => {
tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), fn_ty)
/// necessary. If the ID does not correspond to a closure ID, returns None.
fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_id: DefId,
- substs: &ty::ClosureSubsts<'tcx>)
+ substs: ty::ClosureSubsts<'tcx>)
-> ValueRef {
// Normalize type so differences in regions and typedefs don't cause
// duplicate declarations
let tcx = ccx.tcx();
- let substs = tcx.erase_regions(substs);
- let instance = Instance::new(closure_id, &substs.func_substs);
+ let substs = tcx.erase_regions(&substs);
+ let instance = Instance::new(closure_id, substs.func_substs);
if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}",
let symbol = symbol_names::exported_name(ccx, &instance);
// Compute the rust-call form of the closure call method.
- let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
- let sig = &infcx.closure_type(closure_id, &substs).sig;
+ let sig = &tcx.closure_type(closure_id, substs).sig;
let sig = tcx.erase_late_bound_regions(sig);
- let sig = infer::normalize_associated_type(tcx, &sig);
- let closure_type = tcx.mk_closure_from_closure_substs(closure_id, Box::new(substs));
- let function_type = tcx.mk_fn_ptr(ty::BareFnTy {
+ let sig = tcx.normalize_associated_type(&sig);
+ let closure_type = tcx.mk_closure_from_closure_substs(closure_id, substs);
+ let function_type = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::RustCall,
sig: ty::Binder(ty::FnSig {
output: sig.output,
variadic: false
})
- });
+ }));
let llfn = declare::define_internal_fn(ccx, &symbol, function_type);
// set an inline hint for all closures
body: &hir::Block,
id: ast::NodeId,
closure_def_id: DefId, // (*)
- closure_substs: &ty::ClosureSubsts<'tcx>)
+ closure_substs: ty::ClosureSubsts<'tcx>)
-> Option<Block<'a, 'tcx>>
{
// (*) Note that in the case of inlined functions, the `closure_def_id` will be the
// this function (`trans_closure`) is invoked at the point
// of the closure expression.
- let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any);
- let function_type = infcx.closure_type(closure_def_id, closure_substs);
-
- let sig = tcx.erase_late_bound_regions(&function_type.sig);
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = &tcx.closure_type(closure_def_id, closure_substs).sig;
+ let sig = tcx.erase_late_bound_regions(sig);
+ let sig = tcx.normalize_associated_type(&sig);
let closure_type = tcx.mk_closure_from_closure_substs(closure_def_id,
- Box::new(closure_substs.clone()));
+ closure_substs);
let sig = ty::FnSig {
inputs: Some(get_self_type(tcx, closure_def_id, closure_type))
.into_iter().chain(sig.inputs).collect(),
-> ValueRef
{
// If this is a closure, redirect to it.
- let llfn = get_or_create_closure_declaration(ccx, closure_def_id, &substs);
+ let llfn = get_or_create_closure_declaration(ccx, closure_def_id, substs);
// If the closure is a Fn closure, but a FnOnce is needed (etc),
// then adapt the self type
closure_def_id, substs, Value(llreffn));
let tcx = ccx.tcx();
- let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any);
// Find a version of the closure type. Substitute static for the
// region since it doesn't really matter.
- let closure_ty = tcx.mk_closure_from_closure_substs(closure_def_id, Box::new(substs.clone()));
+ let closure_ty = tcx.mk_closure_from_closure_substs(closure_def_id, substs);
let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), closure_ty);
// Make a version with the type of by-ref closure.
- let ty::ClosureTy { unsafety, abi, mut sig } = infcx.closure_type(closure_def_id, &substs);
+ let ty::ClosureTy { unsafety, abi, mut sig } =
+ tcx.closure_type(closure_def_id, substs);
sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet
- let llref_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+ let llref_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: unsafety,
abi: abi,
sig: sig.clone()
- });
+ }));
debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
llref_fn_ty);
sig.0.inputs[0] = closure_ty;
let sig = tcx.erase_late_bound_regions(&sig);
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = tcx.normalize_associated_type(&sig);
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
- let llonce_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+ let llonce_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: unsafety,
abi: abi,
sig: ty::Binder(sig)
- });
+ }));
// Create the by-value helper.
let function_name =
use syntax::parse::token;
use base::{custom_coerce_unsize_info, llvm_linkage_by_name};
-use context::CrateContext;
-use common::{fulfill_obligation, normalize_and_test_predicates,
- type_is_sized};
+use context::SharedCrateContext;
+use common::{fulfill_obligation, normalize_and_test_predicates, type_is_sized};
use glue::{self, DropGlueKind};
use llvm;
use meth;
}
}
-pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn collect_crate_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
mode: TransItemCollectionMode)
-> (FnvHashSet<TransItem<'tcx>>,
ReferenceMap<'tcx>) {
// We are not tracking dependencies of this pass as it has to be re-executed
// every time no matter what.
- ccx.tcx().dep_graph.with_ignore(|| {
- let roots = collect_roots(ccx, mode);
+ scx.tcx().dep_graph.with_ignore(|| {
+ let roots = collect_roots(scx, mode);
debug!("Building translation item graph, beginning at roots");
let mut visited = FnvHashSet();
let mut reference_map = ReferenceMap::new();
for root in roots {
- collect_items_rec(ccx,
+ collect_items_rec(scx,
root,
&mut visited,
&mut recursion_depths,
// Find all non-generic items by walking the HIR. These items serve as roots to
// start monomorphizing from.
-fn collect_roots<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn collect_roots<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
mode: TransItemCollectionMode)
-> Vec<TransItem<'tcx>> {
debug!("Collecting roots");
{
let mut visitor = RootCollector {
- ccx: ccx,
+ scx: scx,
mode: mode,
output: &mut roots,
enclosing_item: None,
};
- ccx.tcx().map.krate().visit_all_items(&mut visitor);
+ scx.tcx().map.krate().visit_all_items(&mut visitor);
}
roots
}
// Collect all monomorphized translation items reachable from `starting_point`
-fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
+fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>,
starting_point: TransItem<'tcx>,
visited: &mut FnvHashSet<TransItem<'tcx>>,
recursion_depths: &mut DefIdMap<usize>,
// We've been here already, no need to search again.
return;
}
- debug!("BEGIN collect_items_rec({})", starting_point.to_string(ccx));
+ debug!("BEGIN collect_items_rec({})", starting_point.to_string(scx.tcx()));
let mut neighbors = Vec::new();
let recursion_depth_reset;
match starting_point {
TransItem::DropGlue(t) => {
- find_drop_glue_neighbors(ccx, t, &mut neighbors);
+ find_drop_glue_neighbors(scx, t, &mut neighbors);
recursion_depth_reset = None;
}
TransItem::Static(node_id) => {
- let def_id = ccx.tcx().map.local_def_id(node_id);
- let ty = ccx.tcx().lookup_item_type(def_id).ty;
- let ty = glue::get_drop_glue_type(ccx, ty);
+ let def_id = scx.tcx().map.local_def_id(node_id);
+ let ty = scx.tcx().lookup_item_type(def_id).ty;
+ let ty = glue::get_drop_glue_type(scx.tcx(), ty);
neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
recursion_depth_reset = None;
}
TransItem::Fn(instance) => {
// Keep track of the monomorphization recursion depth
- recursion_depth_reset = Some(check_recursion_limit(ccx,
+ recursion_depth_reset = Some(check_recursion_limit(scx.tcx(),
instance,
recursion_depths));
// Scan the MIR in order to find function calls, closures, and
// drop-glue
- let mir = errors::expect(ccx.sess().diagnostic(), ccx.get_mir(instance.def),
+ let mir = errors::expect(scx.sess().diagnostic(), scx.get_mir(instance.def),
|| format!("Could not find MIR for function: {}", instance));
let mut visitor = MirNeighborCollector {
- ccx: ccx,
+ scx: scx,
mir: &mir,
output: &mut neighbors,
param_substs: instance.substs
}
}
- record_references(ccx, starting_point, &neighbors[..], reference_map);
+ record_references(scx.tcx(), starting_point, &neighbors[..], reference_map);
for neighbour in neighbors {
- collect_items_rec(ccx, neighbour, visited, recursion_depths, reference_map);
+ collect_items_rec(scx, neighbour, visited, recursion_depths, reference_map);
}
if let Some((def_id, depth)) = recursion_depth_reset {
recursion_depths.insert(def_id, depth);
}
- debug!("END collect_items_rec({})", starting_point.to_string(ccx));
+ debug!("END collect_items_rec({})", starting_point.to_string(scx.tcx()));
}
-fn record_references<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn record_references<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
caller: TransItem<'tcx>,
callees: &[TransItem<'tcx>],
reference_map: &mut ReferenceMap<'tcx>) {
let iter = callees.into_iter()
.map(|callee| {
let is_inlining_candidate = callee.is_from_extern_crate() ||
- callee.requests_inline(ccx.tcx());
+ callee.requests_inline(tcx);
(*callee, is_inlining_candidate)
});
reference_map.record_references(caller, iter);
}
-fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
- instance: Instance<'tcx>,
- recursion_depths: &mut DefIdMap<usize>)
- -> (DefId, usize) {
+fn check_recursion_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ instance: Instance<'tcx>,
+ recursion_depths: &mut DefIdMap<usize>)
+ -> (DefId, usize) {
let recursion_depth = recursion_depths.get(&instance.def)
.map(|x| *x)
.unwrap_or(0);
// Code that needs to instantiate the same function recursively
// more than the recursion limit is assumed to be causing an
// infinite expansion.
- if recursion_depth > ccx.sess().recursion_limit.get() {
+ if recursion_depth > tcx.sess.recursion_limit.get() {
let error = format!("reached the recursion limit while instantiating `{}`",
instance);
- if let Some(node_id) = ccx.tcx().map.as_local_node_id(instance.def) {
- ccx.sess().span_fatal(ccx.tcx().map.span(node_id), &error);
+ if let Some(node_id) = tcx.map.as_local_node_id(instance.def) {
+ tcx.sess.span_fatal(tcx.map.span(node_id), &error);
} else {
- ccx.sess().fatal(&error);
+ tcx.sess.fatal(&error);
}
}
}
struct MirNeighborCollector<'a, 'tcx: 'a> {
- ccx: &'a CrateContext<'a, 'tcx>,
+ scx: &'a SharedCrateContext<'a, 'tcx>,
mir: &'a mir::Mir<'tcx>,
output: &'a mut Vec<TransItem<'tcx>>,
param_substs: &'tcx Substs<'tcx>
match *rvalue {
mir::Rvalue::Aggregate(mir::AggregateKind::Closure(def_id,
ref substs), _) => {
- assert!(can_have_local_instance(self.ccx, def_id));
- let trans_item = create_fn_trans_item(self.ccx,
+ assert!(can_have_local_instance(self.scx.tcx(), def_id));
+ let trans_item = create_fn_trans_item(self.scx.tcx(),
def_id,
substs.func_substs,
self.param_substs);
// have to instantiate all methods of the trait being cast to, so we
// can build the appropriate vtable.
mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => {
- let target_ty = monomorphize::apply_param_substs(self.ccx.tcx(),
+ let target_ty = monomorphize::apply_param_substs(self.scx.tcx(),
self.param_substs,
&target_ty);
- let source_ty = self.mir.operand_ty(self.ccx.tcx(), operand);
- let source_ty = monomorphize::apply_param_substs(self.ccx.tcx(),
+ let source_ty = self.mir.operand_ty(self.scx.tcx(), operand);
+ let source_ty = monomorphize::apply_param_substs(self.scx.tcx(),
self.param_substs,
&source_ty);
- let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.ccx,
+ let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.scx,
source_ty,
target_ty);
// This could also be a different Unsize instruction, like
// from a fixed sized array to a slice. But we are only
// interested in things that produce a vtable.
if target_ty.is_trait() && !source_ty.is_trait() {
- create_trans_items_for_vtable_methods(self.ccx,
+ create_trans_items_for_vtable_methods(self.scx,
target_ty,
source_ty,
self.output);
}
mir::Rvalue::Box(_) => {
let exchange_malloc_fn_def_id =
- self.ccx
+ self.scx
.tcx()
.lang_items
.require(ExchangeMallocFnLangItem)
- .unwrap_or_else(|e| self.ccx.sess().fatal(&e));
+ .unwrap_or_else(|e| self.scx.sess().fatal(&e));
- assert!(can_have_local_instance(self.ccx, exchange_malloc_fn_def_id));
+ assert!(can_have_local_instance(self.scx.tcx(), exchange_malloc_fn_def_id));
let exchange_malloc_fn_trans_item =
- create_fn_trans_item(self.ccx,
+ create_fn_trans_item(self.scx.tcx(),
exchange_malloc_fn_def_id,
- &Substs::empty(),
+ self.scx.tcx().mk_substs(Substs::empty()),
self.param_substs);
self.output.push(exchange_malloc_fn_trans_item);
debug!("visiting lvalue {:?}", *lvalue);
if let mir_visit::LvalueContext::Drop = context {
- let ty = self.mir.lvalue_ty(self.ccx.tcx(), lvalue)
- .to_ty(self.ccx.tcx());
+ let ty = self.mir.lvalue_ty(self.scx.tcx(), lvalue)
+ .to_ty(self.scx.tcx());
- let ty = monomorphize::apply_param_substs(self.ccx.tcx(),
+ let ty = monomorphize::apply_param_substs(self.scx.tcx(),
self.param_substs,
&ty);
- let ty = self.ccx.tcx().erase_regions(&ty);
- let ty = glue::get_drop_glue_type(self.ccx, ty);
+ let ty = self.scx.tcx().erase_regions(&ty);
+ let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
//
// Calling do_static_dispatch() here will map the def_id of
// `std::cmp::partial_cmp` to the def_id of `i32::partial_cmp<i32>`
- let dispatched = do_static_dispatch(self.ccx,
+ let dispatched = do_static_dispatch(self.scx,
callee_def_id,
callee_substs,
self.param_substs);
// object shim or a closure that is handled differently),
// we check if the callee is something that will actually
// result in a translation item ...
- if can_result_in_trans_item(self.ccx, callee_def_id) {
+ if can_result_in_trans_item(self.scx.tcx(), callee_def_id) {
// ... and create one if it does.
- let trans_item = create_fn_trans_item(self.ccx,
+ let trans_item = create_fn_trans_item(self.scx.tcx(),
callee_def_id,
callee_substs,
self.param_substs);
self.super_operand(operand);
- fn can_result_in_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ fn can_result_in_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> bool {
- if !match ccx.tcx().lookup_item_type(def_id).ty.sty {
+ if !match tcx.lookup_item_type(def_id).ty.sty {
ty::TyFnDef(def_id, _, _) => {
// Some constructors also have type TyFnDef but they are
// always instantiated inline and don't result in
// translation item. Same for FFI functions.
- match ccx.tcx().map.get_if_local(def_id) {
+ match tcx.map.get_if_local(def_id) {
Some(hir_map::NodeVariant(_)) |
Some(hir_map::NodeStructCtor(_)) |
Some(hir_map::NodeForeignItem(_)) => false,
Some(_) => true,
None => {
- ccx.sess().cstore.variant_kind(def_id).is_none()
+ tcx.sess.cstore.variant_kind(def_id).is_none()
}
}
}
return false;
}
- can_have_local_instance(ccx, def_id)
+ can_have_local_instance(tcx, def_id)
}
}
}
-fn can_have_local_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn can_have_local_instance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> bool {
// Take a look if we have the definition available. If not, we
// will not emit code for this item in the local crate, and thus
// don't create a translation item for it.
- def_id.is_local() || ccx.sess().cstore.is_item_mir_available(def_id)
+ def_id.is_local() || tcx.sess.cstore.is_item_mir_available(def_id)
}
-fn find_drop_glue_neighbors<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
dg: DropGlueKind<'tcx>,
output: &mut Vec<TransItem<'tcx>>) {
let ty = match dg {
}
};
- debug!("find_drop_glue_neighbors: {}", type_to_string(ccx, ty));
+ debug!("find_drop_glue_neighbors: {}", type_to_string(scx.tcx(), ty));
// Make sure the exchange_free_fn() lang-item gets translated if
// there is a boxed value.
if let ty::TyBox(_) = ty.sty {
- let exchange_free_fn_def_id = ccx.tcx()
+ let exchange_free_fn_def_id = scx.tcx()
.lang_items
.require(ExchangeFreeFnLangItem)
- .unwrap_or_else(|e| ccx.sess().fatal(&e));
+ .unwrap_or_else(|e| scx.sess().fatal(&e));
- assert!(can_have_local_instance(ccx, exchange_free_fn_def_id));
+ assert!(can_have_local_instance(scx.tcx(), exchange_free_fn_def_id));
let exchange_free_fn_trans_item =
- create_fn_trans_item(ccx,
+ create_fn_trans_item(scx.tcx(),
exchange_free_fn_def_id,
- &Substs::empty(),
- &Substs::empty());
+ scx.tcx().mk_substs(Substs::empty()),
+ scx.tcx().mk_substs(Substs::empty()));
output.push(exchange_free_fn_trans_item);
}
if let Some(destructor_did) = destructor_did {
use rustc::ty::ToPolyTraitRef;
- let drop_trait_def_id = ccx.tcx()
+ let drop_trait_def_id = scx.tcx()
.lang_items
.drop_trait()
.unwrap();
- let self_type_substs = ccx.tcx().mk_substs(
+ let self_type_substs = scx.tcx().mk_substs(
Substs::empty().with_self_ty(ty));
let trait_ref = ty::TraitRef {
substs: self_type_substs,
}.to_poly_trait_ref();
- let substs = match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+ let substs = match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
traits::VtableImpl(data) => data.substs,
_ => bug!()
};
- if can_have_local_instance(ccx, destructor_did) {
- let trans_item = create_fn_trans_item(ccx,
+ if can_have_local_instance(scx.tcx(), destructor_did) {
+ let trans_item = create_fn_trans_item(scx.tcx(),
destructor_did,
substs,
- &Substs::empty());
+ scx.tcx().mk_substs(Substs::empty()));
output.push(trans_item);
}
ty::TyStruct(ref adt_def, substs) |
ty::TyEnum(ref adt_def, substs) => {
for field in adt_def.all_fields() {
- let field_type = monomorphize::apply_param_substs(ccx.tcx(),
+ let field_type = monomorphize::apply_param_substs(scx.tcx(),
substs,
&field.unsubst_ty());
- let field_type = glue::get_drop_glue_type(ccx, field_type);
+ let field_type = glue::get_drop_glue_type(scx.tcx(), field_type);
- if glue::type_needs_drop(ccx.tcx(), field_type) {
+ if glue::type_needs_drop(scx.tcx(), field_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type)));
}
}
}
- ty::TyClosure(_, ref substs) => {
- for upvar_ty in &substs.upvar_tys {
- let upvar_ty = glue::get_drop_glue_type(ccx, upvar_ty);
- if glue::type_needs_drop(ccx.tcx(), upvar_ty) {
+ ty::TyClosure(_, substs) => {
+ for upvar_ty in substs.upvar_tys {
+ let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty);
+ if glue::type_needs_drop(scx.tcx(), upvar_ty) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty)));
}
}
}
ty::TyBox(inner_type) |
ty::TyArray(inner_type, _) => {
- let inner_type = glue::get_drop_glue_type(ccx, inner_type);
- if glue::type_needs_drop(ccx.tcx(), inner_type) {
+ let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type);
+ if glue::type_needs_drop(scx.tcx(), inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
- ty::TyTuple(ref args) => {
+ ty::TyTuple(args) => {
for arg in args {
- let arg = glue::get_drop_glue_type(ccx, arg);
- if glue::type_needs_drop(ccx.tcx(), arg) {
+ let arg = glue::get_drop_glue_type(scx.tcx(), arg);
+ if glue::type_needs_drop(scx.tcx(), arg) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(arg)));
}
}
}
}
-fn do_static_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn do_static_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
fn_def_id: DefId,
fn_substs: &'tcx Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>)
-> Option<(DefId, &'tcx Substs<'tcx>)> {
debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})",
- def_id_to_string(ccx, fn_def_id),
+ def_id_to_string(scx.tcx(), fn_def_id),
fn_substs,
param_substs);
- let is_trait_method = ccx.tcx().trait_of_item(fn_def_id).is_some();
+ let is_trait_method = scx.tcx().trait_of_item(fn_def_id).is_some();
if is_trait_method {
- match ccx.tcx().impl_or_trait_item(fn_def_id) {
+ match scx.tcx().impl_or_trait_item(fn_def_id) {
ty::MethodTraitItem(ref method) => {
match method.container {
ty::TraitContainer(trait_def_id) => {
debug!(" => trait method, attempting to find impl");
- do_static_trait_method_dispatch(ccx,
+ do_static_trait_method_dispatch(scx,
method,
trait_def_id,
fn_substs,
// Given a trait-method and substitution information, find out the actual
// implementation of the trait method.
-fn do_static_trait_method_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn do_static_trait_method_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
trait_method: &ty::Method,
trait_id: DefId,
callee_substs: &'tcx Substs<'tcx>,
param_substs: &'tcx Substs<'tcx>)
-> Option<(DefId, &'tcx Substs<'tcx>)> {
- let tcx = ccx.tcx();
+ let tcx = scx.tcx();
debug!("do_static_trait_method_dispatch(trait_method={}, \
trait_id={}, \
callee_substs={:?}, \
param_substs={:?}",
- def_id_to_string(ccx, trait_method.def_id),
- def_id_to_string(ccx, trait_id),
+ def_id_to_string(scx.tcx(), trait_method.def_id),
+ def_id_to_string(scx.tcx(), trait_id),
callee_substs,
param_substs);
let rcvr_substs = monomorphize::apply_param_substs(tcx,
param_substs,
- callee_substs);
+ &callee_substs);
let trait_ref = ty::Binder(rcvr_substs.to_trait_ref(tcx, trait_id));
- let vtbl = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
+ let vtbl = fulfill_obligation(scx, DUMMY_SP, trait_ref);
// Now that we know which impl is being used, we can dispatch to
// the actual function:
///
/// Finally, there is also the case of custom unsizing coercions, e.g. for
/// smart pointers such as `Rc` and `Arc`.
-fn find_vtable_types_for_unsizing<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
source_ty: ty::Ty<'tcx>,
target_ty: ty::Ty<'tcx>)
-> (ty::Ty<'tcx>, ty::Ty<'tcx>) {
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
let (inner_source, inner_target) = (a, b);
- if !type_is_sized(ccx.tcx(), inner_source) {
+ if !type_is_sized(scx.tcx(), inner_source) {
(inner_source, inner_target)
} else {
- ccx.tcx().struct_lockstep_tails(inner_source, inner_target)
+ scx.tcx().struct_lockstep_tails(inner_source, inner_target)
}
}
&ty::TyStruct(target_adt_def, target_substs)) => {
assert_eq!(source_adt_def, target_adt_def);
- let kind = custom_coerce_unsize_info(ccx, source_ty, target_ty);
+ let kind = custom_coerce_unsize_info(scx, source_ty, target_ty);
let coerce_index = match kind {
CustomCoerceUnsized::Struct(i) => i
assert!(coerce_index < source_fields.len() &&
source_fields.len() == target_fields.len());
- find_vtable_types_for_unsizing(ccx,
- source_fields[coerce_index].ty(ccx.tcx(),
+ find_vtable_types_for_unsizing(scx,
+ source_fields[coerce_index].ty(scx.tcx(),
source_substs),
- target_fields[coerce_index].ty(ccx.tcx(),
+ target_fields[coerce_index].ty(scx.tcx(),
target_substs))
}
_ => bug!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
}
}
-fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn create_fn_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
- fn_substs: &Substs<'tcx>,
- param_substs: &Substs<'tcx>)
- -> TransItem<'tcx>
-{
+ fn_substs: &'tcx Substs<'tcx>,
+ param_substs: &'tcx Substs<'tcx>)
+ -> TransItem<'tcx> {
debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})",
- def_id_to_string(ccx, def_id),
+ def_id_to_string(tcx, def_id),
fn_substs,
param_substs);
// We only get here, if fn_def_id either designates a local item or
// an inlineable external item. Non-inlineable external items are
// ignored because we don't want to generate any code for them.
- let concrete_substs = monomorphize::apply_param_substs(ccx.tcx(),
+ let concrete_substs = monomorphize::apply_param_substs(tcx,
param_substs,
- fn_substs);
- let concrete_substs = ccx.tcx().erase_regions(&concrete_substs);
+ &fn_substs);
+ let concrete_substs = tcx.erase_regions(&concrete_substs);
let trans_item =
- TransItem::Fn(Instance::new(def_id,
- &ccx.tcx().mk_substs(concrete_substs)));
-
+ TransItem::Fn(Instance::new(def_id, concrete_substs));
return trans_item;
}
/// Creates a `TransItem` for each method that is referenced by the vtable for
/// the given trait/impl pair.
-fn create_trans_items_for_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
trait_ty: ty::Ty<'tcx>,
impl_ty: ty::Ty<'tcx>,
output: &mut Vec<TransItem<'tcx>>) {
assert!(!trait_ty.needs_subst() && !impl_ty.needs_subst());
if let ty::TyTrait(ref trait_ty) = trait_ty.sty {
- let poly_trait_ref = trait_ty.principal_trait_ref_with_self_ty(ccx.tcx(),
+ let poly_trait_ref = trait_ty.principal_trait_ref_with_self_ty(scx.tcx(),
impl_ty);
// Walk all methods of the trait, including those of its supertraits
- for trait_ref in traits::supertraits(ccx.tcx(), poly_trait_ref) {
- let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref);
+ for trait_ref in traits::supertraits(scx.tcx(), poly_trait_ref) {
+ let vtable = fulfill_obligation(scx, DUMMY_SP, trait_ref);
match vtable {
traits::VtableImpl(
traits::VtableImplData {
impl_def_id,
substs,
nested: _ }) => {
- let items = meth::get_vtable_methods(ccx, impl_def_id, substs)
+ let items = meth::get_vtable_methods(scx.tcx(), impl_def_id, substs)
.into_iter()
// filter out None values
.filter_map(|opt_impl_method| opt_impl_method)
// create translation items
.filter_map(|impl_method| {
- if can_have_local_instance(ccx, impl_method.method.def_id) {
- Some(create_fn_trans_item(ccx,
- impl_method.method.def_id,
- &impl_method.substs,
- &Substs::empty()))
+ if can_have_local_instance(scx.tcx(), impl_method.method.def_id) {
+ Some(create_fn_trans_item(scx.tcx(),
+ impl_method.method.def_id,
+ impl_method.substs,
+ scx.tcx().mk_substs(Substs::empty())))
} else {
None
}
//=-----------------------------------------------------------------------------
struct RootCollector<'b, 'a: 'b, 'tcx: 'a + 'b> {
- ccx: &'b CrateContext<'a, 'tcx>,
+ scx: &'b SharedCrateContext<'a, 'tcx>,
mode: TransItemCollectionMode,
output: &'b mut Vec<TransItem<'tcx>>,
enclosing_item: Option<&'tcx hir::Item>,
hir::ItemImpl(..) => {
if self.mode == TransItemCollectionMode::Eager {
- create_trans_items_for_default_impls(self.ccx,
+ create_trans_items_for_default_impls(self.scx.tcx(),
item,
self.output);
}
hir::ItemStruct(_, ref generics) => {
if !generics.is_parameterized() {
let ty = {
- let tables = self.ccx.tcx().tables.borrow();
+ let tables = self.scx.tcx().tables.borrow();
tables.node_types[&item.id]
};
if self.mode == TransItemCollectionMode::Eager {
debug!("RootCollector: ADT drop-glue for {}",
- def_id_to_string(self.ccx,
- self.ccx.tcx().map.local_def_id(item.id)));
+ def_id_to_string(self.scx.tcx(),
+ self.scx.tcx().map.local_def_id(item.id)));
- let ty = glue::get_drop_glue_type(self.ccx, ty);
+ let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
}
}
hir::ItemStatic(..) => {
debug!("RootCollector: ItemStatic({})",
- def_id_to_string(self.ccx,
- self.ccx.tcx().map.local_def_id(item.id)));
+ def_id_to_string(self.scx.tcx(),
+ self.scx.tcx().map.local_def_id(item.id)));
self.output.push(TransItem::Static(item.id));
}
hir::ItemFn(_, _, constness, _, ref generics, _) => {
if !generics.is_type_parameterized() &&
constness == hir::Constness::NotConst {
- let def_id = self.ccx.tcx().map.local_def_id(item.id);
+ let def_id = self.scx.tcx().map.local_def_id(item.id);
debug!("RootCollector: ItemFn({})",
- def_id_to_string(self.ccx, def_id));
+ def_id_to_string(self.scx.tcx(), def_id));
- let instance = Instance::mono(self.ccx.tcx(), def_id);
+ let instance = Instance::mono(self.scx.tcx(), def_id);
self.output.push(TransItem::Fn(instance));
}
}
constness,
..
}, _) if constness == hir::Constness::NotConst => {
- let hir_map = &self.ccx.tcx().map;
+ let hir_map = &self.scx.tcx().map;
let parent_node_id = hir_map.get_parent_node(ii.id);
let is_impl_generic = match hir_map.expect_item(parent_node_id) {
&hir::Item {
};
if !generics.is_type_parameterized() && !is_impl_generic {
- let def_id = self.ccx.tcx().map.local_def_id(ii.id);
+ let def_id = self.scx.tcx().map.local_def_id(ii.id);
debug!("RootCollector: MethodImplItem({})",
- def_id_to_string(self.ccx, def_id));
+ def_id_to_string(self.scx.tcx(), def_id));
- let instance = Instance::mono(self.ccx.tcx(), def_id);
+ let instance = Instance::mono(self.scx.tcx(), def_id);
self.output.push(TransItem::Fn(instance));
}
}
}
}
-fn create_trans_items_for_default_impls<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn create_trans_items_for_default_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
item: &'tcx hir::Item,
output: &mut Vec<TransItem<'tcx>>) {
match item.node {
return
}
- let tcx = ccx.tcx();
let impl_def_id = tcx.map.local_def_id(item.id);
debug!("create_trans_items_for_default_impls(item={})",
- def_id_to_string(ccx, impl_def_id));
+ def_id_to_string(tcx, impl_def_id));
if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
let default_impls = tcx.provided_trait_methods(trait_ref.def_id);
- let callee_substs = tcx.mk_substs(tcx.erase_regions(trait_ref.substs));
+ let callee_substs = tcx.erase_regions(&trait_ref.substs);
let overridden_methods: FnvHashSet<_> = items.iter()
.map(|item| item.name)
.collect();
assert!(mth.is_provided);
let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
- if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
+ if !normalize_and_test_predicates(tcx, predicates.into_vec()) {
continue;
}
- if can_have_local_instance(ccx, default_impl.def_id) {
- let empty_substs = ccx.tcx().mk_substs(ccx.tcx().erase_regions(mth.substs));
- let item = create_fn_trans_item(ccx,
+ if can_have_local_instance(tcx, default_impl.def_id) {
+ let empty_substs = tcx.erase_regions(&mth.substs);
+ let item = create_fn_trans_item(tcx,
default_impl.def_id,
callee_substs,
empty_substs);
/// Same as `unique_type_name()` but with the result pushed onto the given
/// `output` parameter.
-pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
t: ty::Ty<'tcx>,
output: &mut String) {
match t.sty {
ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"),
ty::TyStruct(adt_def, substs) |
ty::TyEnum(adt_def, substs) => {
- push_item_name(cx, adt_def.did, output);
- push_type_params(cx, &substs.types, &[], output);
+ push_item_name(tcx, adt_def.did, output);
+ push_type_params(tcx, &substs.types, &[], output);
},
- ty::TyTuple(ref component_types) => {
+ ty::TyTuple(component_types) => {
output.push('(');
for &component_type in component_types {
- push_unique_type_name(cx, component_type, output);
+ push_unique_type_name(tcx, component_type, output);
output.push_str(", ");
}
if !component_types.is_empty() {
},
ty::TyBox(inner_type) => {
output.push_str("Box<");
- push_unique_type_name(cx, inner_type, output);
+ push_unique_type_name(tcx, inner_type, output);
output.push('>');
},
ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
hir::MutMutable => output.push_str("mut "),
}
- push_unique_type_name(cx, inner_type, output);
+ push_unique_type_name(tcx, inner_type, output);
},
ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => {
output.push('&');
output.push_str("mut ");
}
- push_unique_type_name(cx, inner_type, output);
+ push_unique_type_name(tcx, inner_type, output);
},
ty::TyArray(inner_type, len) => {
output.push('[');
- push_unique_type_name(cx, inner_type, output);
+ push_unique_type_name(tcx, inner_type, output);
output.push_str(&format!("; {}", len));
output.push(']');
},
ty::TySlice(inner_type) => {
output.push('[');
- push_unique_type_name(cx, inner_type, output);
+ push_unique_type_name(tcx, inner_type, output);
output.push(']');
},
ty::TyTrait(ref trait_data) => {
- push_item_name(cx, trait_data.principal.skip_binder().def_id, output);
- push_type_params(cx,
+ push_item_name(tcx, trait_data.principal.skip_binder().def_id, output);
+ push_type_params(tcx,
&trait_data.principal.skip_binder().substs.types,
&trait_data.bounds.projection_bounds,
output);
output.push_str("fn(");
- let sig = cx.tcx().erase_late_bound_regions(sig);
+ let sig = tcx.erase_late_bound_regions(sig);
if !sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
- push_unique_type_name(cx, parameter_type, output);
+ push_unique_type_name(tcx, parameter_type, output);
output.push_str(", ");
}
output.pop();
ty::FnConverging(result_type) if result_type.is_nil() => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
- push_unique_type_name(cx, result_type, output);
+ push_unique_type_name(tcx, result_type, output);
}
ty::FnDiverging => {
output.push_str(" -> !");
}
},
ty::TyClosure(def_id, ref closure_substs) => {
- push_item_name(cx, def_id, output);
+ push_item_name(tcx, def_id, output);
output.push_str("{");
output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize()));
output.push_str("}");
- push_type_params(cx, &closure_substs.func_substs.types, &[], output);
+ push_type_params(tcx, &closure_substs.func_substs.types, &[], output);
}
ty::TyError |
ty::TyInfer(_) |
}
}
-fn push_item_name(ccx: &CrateContext,
- def_id: DefId,
- output: &mut String) {
- let def_path = ccx.tcx().def_path(def_id);
+fn push_item_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ output: &mut String) {
+ let def_path = tcx.def_path(def_id);
// some_crate::
- output.push_str(&ccx.tcx().crate_name(def_path.krate));
+ output.push_str(&tcx.crate_name(def_path.krate));
output.push_str("::");
// foo::bar::ItemName::
- for part in ccx.tcx().def_path(def_id).data {
+ for part in tcx.def_path(def_id).data {
output.push_str(&format!("{}[{}]::",
part.data.as_interned_str(),
part.disambiguator));
output.pop();
}
-fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn push_type_params<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
types: &'tcx subst::VecPerParamSpace<Ty<'tcx>>,
projections: &[ty::PolyProjectionPredicate<'tcx>],
output: &mut String) {
output.push('<');
for &type_parameter in types {
- push_unique_type_name(cx, type_parameter, output);
+ push_unique_type_name(tcx, type_parameter, output);
output.push_str(", ");
}
let name = token::get_ident_interner().get(projection.projection_ty.item_name);
output.push_str(&name[..]);
output.push_str("=");
- push_unique_type_name(cx, projection.ty, output);
+ push_unique_type_name(tcx, projection.ty, output);
output.push_str(", ");
}
output.push('>');
}
-fn push_instance_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn push_instance_as_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: Instance<'tcx>,
output: &mut String) {
- push_item_name(ccx, instance.def, output);
- push_type_params(ccx, &instance.substs.types, &[], output);
+ push_item_name(tcx, instance.def, output);
+ push_type_params(tcx, &instance.substs.types, &[], output);
}
-pub fn def_id_to_string(ccx: &CrateContext, def_id: DefId) -> String {
+pub fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId) -> String {
let mut output = String::new();
- push_item_name(ccx, def_id, &mut output);
+ push_item_name(tcx, def_id, &mut output);
output
}
-fn type_to_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn type_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: ty::Ty<'tcx>)
-> String {
let mut output = String::new();
- push_unique_type_name(ccx, ty, &mut output);
+ push_unique_type_name(tcx, ty, &mut output);
output
}
-impl<'tcx> TransItem<'tcx> {
-
- pub fn requests_inline(&self, tcx: &TyCtxt<'tcx>) -> bool {
+impl<'a, 'tcx> TransItem<'tcx> {
+ pub fn requests_inline(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool {
match *self {
TransItem::Fn(ref instance) => {
let attributes = tcx.get_attrs(instance.def);
}
}
- pub fn explicit_linkage(&self, tcx: &TyCtxt<'tcx>) -> Option<llvm::Linkage> {
+ pub fn explicit_linkage(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option<llvm::Linkage> {
let def_id = match *self {
TransItem::Fn(ref instance) => instance.def,
TransItem::Static(node_id) => tcx.map.local_def_id(node_id),
}
}
- pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
- let hir_map = &ccx.tcx().map;
+ pub fn to_string(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String {
+ let hir_map = &tcx.map;
return match *self {
TransItem::DropGlue(dg) => {
DropGlueKind::Ty(_) => s.push_str("drop-glue "),
DropGlueKind::TyContents(_) => s.push_str("drop-glue-contents "),
};
- push_unique_type_name(ccx, dg.ty(), &mut s);
+ push_unique_type_name(tcx, dg.ty(), &mut s);
s
}
TransItem::Fn(instance) => {
- to_string_internal(ccx, "fn ", instance)
+ to_string_internal(tcx, "fn ", instance)
},
TransItem::Static(node_id) => {
let def_id = hir_map.local_def_id(node_id);
- let instance = Instance::mono(ccx.tcx(), def_id);
- to_string_internal(ccx, "static ", instance)
+ let instance = Instance::mono(tcx, def_id);
+ to_string_internal(tcx, "static ", instance)
},
};
- fn to_string_internal<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ fn to_string_internal<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
prefix: &str,
instance: Instance<'tcx>)
-> String {
let mut result = String::with_capacity(32);
result.push_str(prefix);
- push_instance_as_string(ccx, instance, &mut result);
+ push_instance_as_string(tcx, instance, &mut result);
result
}
}
NotPredictedButGenerated,
}
-pub fn collecting_debug_information(ccx: &CrateContext) -> bool {
+pub fn collecting_debug_information(scx: &SharedCrateContext) -> bool {
return cfg!(debug_assertions) &&
- ccx.sess().opts.debugging_opts.print_trans_items.is_some();
+ scx.sess().opts.debugging_opts.print_trans_items.is_some();
}
-pub fn print_collection_results<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
+pub fn print_collection_results<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>) {
use std::hash::{Hash, SipHasher, Hasher};
- if !collecting_debug_information(ccx) {
+ if !collecting_debug_information(scx) {
return;
}
s.finish()
}
- let trans_items = ccx.translation_items().borrow();
+ let trans_items = scx.translation_items().borrow();
{
// Check for duplicate item keys
let mut item_keys = FnvHashMap();
for (item, item_state) in trans_items.iter() {
- let k = item.to_string(&ccx);
+ let k = item.to_string(scx.tcx());
if item_keys.contains_key(&k) {
let prev: (TransItem, TransItemState) = item_keys[&k];
let mut generated = FnvHashSet();
for (item, item_state) in trans_items.iter() {
- let item_key = item.to_string(&ccx);
+ let item_key = item.to_string(scx.tcx());
match *item_state {
TransItemState::PredictedAndGenerated => {
use rustc::cfg;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
-use rustc::infer;
+use rustc::infer::TransNormalize;
+use rustc::util::common::MemoizationMap;
use middle::lang_items::LangItem;
use rustc::ty::subst::Substs;
use abi::{Abi, FnType};
use syntax::parse::token::InternedString;
use syntax::parse::token;
-pub use context::CrateContext;
+pub use context::{CrateContext, SharedCrateContext};
/// Is the type's representation size known at compile time?
-pub fn type_is_sized<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
- ty.is_sized(&tcx.empty_parameter_environment(), DUMMY_SP)
+pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP)
}
-pub fn type_is_fat_ptr<'tcx>(cx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyBox(ty) => {
- !type_is_sized(cx, ty)
+ !type_is_sized(tcx, ty)
}
_ => {
false
pub fields: Vec<Field<'tcx>>
}
-impl<'tcx> VariantInfo<'tcx> {
- pub fn from_ty(tcx: &TyCtxt<'tcx>,
+impl<'a, 'tcx> VariantInfo<'tcx> {
+ pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
opt_def: Option<Def>)
-> Self
}
/// Return the variant corresponding to a given node (e.g. expr)
- pub fn of_node(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
+ pub fn of_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
let node_def = tcx.def_map.borrow().get(&id).map(|v| v.full_def());
Self::from_ty(tcx, ty, node_def)
}
pub llfn: ValueRef,
// always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
- pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
+ pub param_env: ty::ParameterEnvironment<'tcx>,
// A pointer to where to store the return value. If the return type is
// immediate, this points to an alloca in the function. Otherwise, it's a
}
pub fn monomorphize<T>(&self, value: &T) -> T
- where T : TypeFoldable<'tcx>
+ where T: TransNormalize<'tcx>
{
monomorphize::apply_param_substs(self.ccx.tcx(),
self.param_substs,
// landing pads as "landing pads for SEH".
let ccx = self.ccx;
let tcx = ccx.tcx();
- let target = &ccx.sess().target.target;
match tcx.lang_items.eh_personality() {
Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())).reify(ccx).val
}
- _ => if let Some(llpersonality) = ccx.eh_personality().get() {
- llpersonality
- } else {
- let name = if !base::wants_msvc_seh(ccx.sess()) {
- "rust_eh_personality"
- } else if target.arch == "x86" {
- "_except_handler3"
+ _ => {
+ if let Some(llpersonality) = ccx.eh_personality().get() {
+ return llpersonality
+ }
+ let name = if base::wants_msvc_seh(ccx.sess()) {
+ "__CxxFrameHandler3"
} else {
- "__C_specific_handler"
+ "rust_eh_personality"
};
let fty = Type::variadic_func(&[], &Type::i32(ccx));
let f = declare::declare_cfn(ccx, name, fty);
return Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty()));
}
- let ty = tcx.mk_fn_ptr(ty::BareFnTy {
+ let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::C,
sig: ty::Binder(ty::FnSig {
output: ty::FnDiverging,
variadic: false
}),
- });
+ }));
let unwresume = ccx.eh_unwind_resume();
if let Some(llfn) = unwresume.get() {
pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
self.fcx
}
- pub fn tcx(&self) -> &'blk TyCtxt<'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> {
self.fcx.ccx.tcx()
}
pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
}
pub fn monomorphize<T>(&self, value: &T) -> T
- where T : TypeFoldable<'tcx>
+ where T: TransNormalize<'tcx>
{
monomorphize::apply_param_substs(self.tcx(),
self.fcx.param_substs,
pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
self.bcx.fcx()
}
- pub fn tcx(&self) -> &'blk TyCtxt<'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> {
self.bcx.tcx()
}
pub fn sess(&self) -> &'blk Session {
}
pub fn monomorphize<T>(&self, value: &T) -> T
- where T: TypeFoldable<'tcx>
+ where T: TransNormalize<'tcx>
{
self.bcx.monomorphize(value)
}
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
-pub fn fulfill_obligation<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
span: Span,
trait_ref: ty::PolyTraitRef<'tcx>)
-> traits::Vtable<'tcx, ()>
{
- let tcx = ccx.tcx();
+ let tcx = scx.tcx();
// Remove any references to regions; this helps improve caching.
let trait_ref = tcx.erase_regions(&trait_ref);
- // First check the cache.
- match ccx.trait_cache().borrow().get(&trait_ref) {
- Some(vtable) => {
- info!("Cache hit: {:?}", trait_ref);
- return (*vtable).clone();
- }
- None => { }
- }
-
- debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
- trait_ref, trait_ref.def_id());
-
-
- // Do the initial selection for the obligation. This yields the
- // shallow result we are looking for -- that is, what specific impl.
- let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
- let mut selcx = SelectionContext::new(&infcx);
-
- let obligation =
- traits::Obligation::new(traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID),
- trait_ref.to_poly_trait_predicate());
- let selection = match selcx.select(&obligation) {
- Ok(Some(selection)) => selection,
- Ok(None) => {
- // Ambiguity can happen when monomorphizing during trans
- // expands to some humongo type that never occurred
- // statically -- this humongo type can then overflow,
- // leading to an ambiguous result. So report this as an
- // overflow bug, since I believe this is the only case
- // where ambiguity can result.
- debug!("Encountered ambiguity selecting `{:?}` during trans, \
- presuming due to overflow",
- trait_ref);
- ccx.sess().span_fatal(
- span,
- "reached the recursion limit during monomorphization (selection ambiguity)");
- }
- Err(e) => {
- span_bug!(
- span,
- "Encountered error `{:?}` selecting `{:?}` during trans",
- e,
- trait_ref)
- }
- };
-
- // Currently, we use a fulfillment context to completely resolve
- // all nested obligations. This is because they can inform the
- // inference of the impl's type parameters.
- let mut fulfill_cx = traits::FulfillmentContext::new();
- let vtable = selection.map(|predicate| {
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
- });
- let vtable = infer::drain_fulfillment_cx_or_panic(
- span, &infcx, &mut fulfill_cx, &vtable
- );
-
- info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
-
- ccx.trait_cache().borrow_mut().insert(trait_ref, vtable.clone());
-
- vtable
+ scx.trait_cache().memoize(trait_ref, || {
+ debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
+ trait_ref, trait_ref.def_id());
+
+ // Do the initial selection for the obligation. This yields the
+ // shallow result we are looking for -- that is, what specific impl.
+ tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+
+ let obligation_cause = traits::ObligationCause::misc(span,
+ ast::DUMMY_NODE_ID);
+ let obligation = traits::Obligation::new(obligation_cause,
+ trait_ref.to_poly_trait_predicate());
+
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(selection)) => selection,
+ Ok(None) => {
+ // Ambiguity can happen when monomorphizing during trans
+ // expands to some humongo type that never occurred
+ // statically -- this humongo type can then overflow,
+ // leading to an ambiguous result. So report this as an
+ // overflow bug, since I believe this is the only case
+ // where ambiguity can result.
+ debug!("Encountered ambiguity selecting `{:?}` during trans, \
+ presuming due to overflow",
+ trait_ref);
+ tcx.sess.span_fatal(span,
+ "reached the recursion limit during monomorphization \
+ (selection ambiguity)");
+ }
+ Err(e) => {
+ span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
+ e, trait_ref)
+ }
+ };
+
+ // Currently, we use a fulfillment context to completely resolve
+ // all nested obligations. This is because they can inform the
+ // inference of the impl's type parameters.
+ let mut fulfill_cx = traits::FulfillmentContext::new();
+ let vtable = selection.map(|predicate| {
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ });
+ let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
+
+ info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
+ vtable
+ })
+ })
}
/// Normalizes the predicates and checks whether they hold. If this
/// returns false, then either normalize encountered an error or one
/// of the predicates did not hold. Used when creating vtables to
/// check for unsatisfiable methods.
-pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
predicates: Vec<ty::Predicate<'tcx>>)
-> bool
{
debug!("normalize_and_test_predicates(predicates={:?})",
predicates);
- let tcx = ccx.tcx();
- let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
- let mut selcx = SelectionContext::new(&infcx);
- let mut fulfill_cx = traits::FulfillmentContext::new();
- let cause = traits::ObligationCause::dummy();
- let traits::Normalized { value: predicates, obligations } =
- traits::normalize(&mut selcx, cause.clone(), &predicates);
- for obligation in obligations {
- fulfill_cx.register_predicate_obligation(&infcx, obligation);
- }
- for predicate in predicates {
- let obligation = traits::Obligation::new(cause.clone(), predicate);
- fulfill_cx.register_predicate_obligation(&infcx, obligation);
- }
+ tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ let mut fulfill_cx = traits::FulfillmentContext::new();
+ let cause = traits::ObligationCause::dummy();
+ let traits::Normalized { value: predicates, obligations } =
+ traits::normalize(&mut selcx, cause.clone(), &predicates);
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ }
+ for predicate in predicates {
+ let obligation = traits::Obligation::new(cause.clone(), predicate);
+ fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ }
- infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok()
+ infcx.drain_fulfillment_cx(&mut fulfill_cx, &()).is_ok()
+ })
}
pub fn langcall(bcx: Block,
fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
- substs: Substs<'tcx>,
+ substs: &'tcx Substs<'tcx>,
arg_vals: &[ValueRef],
param_substs: &'tcx Substs<'tcx>,
trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> {
let arg_ids = args.iter().map(|arg| arg.pat.id);
let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect();
+ let substs = ccx.tcx().mk_substs(substs.clone().erase_regions());
let substs = monomorphize::apply_param_substs(ccx.tcx(),
param_substs,
- &substs.erase_regions());
- let substs = ccx.tcx().mk_substs(substs);
+ &substs);
const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res)
}
param_substs: &'tcx Substs<'tcx>)
-> &'tcx hir::Expr {
let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs;
+ let substs = ccx.tcx().mk_substs(substs.clone().erase_regions());
let substs = monomorphize::apply_param_substs(ccx.tcx(),
param_substs,
- &substs.erase_regions());
+ &substs);
match lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) {
Some((ref expr, _ty)) => expr,
None => {
Ok(())
}
-pub fn to_const_int(value: ValueRef, t: Ty, tcx: &TyCtxt) -> Option<ConstInt> {
+pub fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
match t.sty {
ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
ast::IntTy::I8 => {
let arg_vals = map_list(args)?;
let method_call = ty::MethodCall::expr(e.id);
let method = cx.tcx().tables.borrow().method_map[&method_call];
- const_fn_call(cx, method.def_id, method.substs.clone(),
+ const_fn_call(cx, method.def_id, method.substs,
&arg_vals, param_substs, trueconst)?
},
hir::ExprType(ref e, _) => const_expr(cx, &e, param_substs, fn_args, trueconst)?.0,
},
hir::ExprClosure(_, ref decl, ref body, _) => {
match ety.sty {
- ty::TyClosure(def_id, ref substs) => {
+ ty::TyClosure(def_id, substs) => {
closure::trans_closure_expr(closure::Dest::Ignore(cx),
decl,
body,
attrs: &[ast::Attribute])
-> Result<ValueRef, ConstEvalErr> {
- if collector::collecting_debug_information(ccx) {
+ if collector::collecting_debug_information(ccx.shared()) {
ccx.record_translation_item_as_generated(TransItem::Static(id));
}
use monomorphize::Instance;
use collector::{TransItem, TransItemState};
+use partitioning::CodegenUnit;
use type_::{Type, TypeNames};
use rustc::ty::subst::{Substs, VecPerParamSpace};
use rustc::ty::{self, Ty, TyCtxt};
/// crate, so it must not contain references to any LLVM data structures
/// (aside from metadata-related ones).
pub struct SharedCrateContext<'a, 'tcx: 'a> {
- local_ccxs: Vec<LocalCrateContext<'tcx>>,
-
metadata_llmod: ModuleRef,
metadata_llcx: ContextRef,
item_symbols: RefCell<NodeMap<String>>,
link_meta: LinkMeta,
symbol_hasher: RefCell<Sha256>,
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
stats: Stats,
check_overflow: bool,
check_drop_flag_for_sanity: bool,
use_dll_storage_attrs: bool,
translation_items: RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>>,
+ trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
}
/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
pub struct LocalCrateContext<'tcx> {
llmod: ModuleRef,
llcx: ContextRef,
- tn: TypeNames,
+ tn: TypeNames, // FIXME: This seems to be largely unused.
+ codegen_unit: CodegenUnit<'tcx>,
needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>,
fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>>,
/// Depth of the current type-of computation - used to bail out
type_of_depth: Cell<usize>,
-
- trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
}
// Implement DepTrackingMapConfig for `trait_cache`
type Key = ty::PolyTraitRef<'tcx>;
type Value = traits::Vtable<'tcx, ()>;
fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
- ty::tls::with(|tcx| {
- let lifted_key = tcx.lift(key).unwrap();
- lifted_key.to_poly_trait_predicate().dep_node()
- })
+ key.to_poly_trait_predicate().dep_node()
+ }
+}
+
+/// This list owns a number of LocalCrateContexts and binds them to their common
+/// SharedCrateContext. This type just exists as a convenience, something to
+/// pass around all LocalCrateContexts with and get an iterator over them.
+pub struct CrateContextList<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccxs: Vec<LocalCrateContext<'tcx>>,
+}
+
+impl<'a, 'tcx: 'a> CrateContextList<'a, 'tcx> {
+
+ pub fn new(shared_ccx: &'a SharedCrateContext<'a, 'tcx>,
+ codegen_units: Vec<CodegenUnit<'tcx>>)
+ -> CrateContextList<'a, 'tcx> {
+ CrateContextList {
+ shared: shared_ccx,
+ local_ccxs: codegen_units.into_iter().map(|codegen_unit| {
+ LocalCrateContext::new(shared_ccx, codegen_unit)
+ }).collect()
+ }
+ }
+
+ pub fn iter<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> {
+ CrateContextIterator {
+ shared: self.shared,
+ index: 0,
+ local_ccxs: &self.local_ccxs[..]
+ }
+ }
+
+ pub fn get_ccx<'b>(&'b self, index: usize) -> CrateContext<'b, 'tcx> {
+ CrateContext {
+ shared: self.shared,
+ index: index,
+ local_ccxs: &self.local_ccxs[..],
+ }
+ }
+
+ pub fn shared(&self) -> &'a SharedCrateContext<'a, 'tcx> {
+ self.shared
}
}
+/// A CrateContext value binds together one LocalCrateContext with the
+/// SharedCrateContext. It exists as a convenience wrapper, so we don't have to
+/// pass around (SharedCrateContext, LocalCrateContext) tuples all over trans.
pub struct CrateContext<'a, 'tcx: 'a> {
shared: &'a SharedCrateContext<'a, 'tcx>,
- local: &'a LocalCrateContext<'tcx>,
- /// The index of `local` in `shared.local_ccxs`. This is used in
+ local_ccxs: &'a [LocalCrateContext<'tcx>],
+ /// The index of `local` in `local_ccxs`. This is used in
/// `maybe_iter(true)` to identify the original `LocalCrateContext`.
index: usize,
}
pub struct CrateContextIterator<'a, 'tcx: 'a> {
shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccxs: &'a [LocalCrateContext<'tcx>],
index: usize,
}
type Item = CrateContext<'a, 'tcx>;
fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
- if self.index >= self.shared.local_ccxs.len() {
+ if self.index >= self.local_ccxs.len() {
return None;
}
Some(CrateContext {
shared: self.shared,
- local: &self.shared.local_ccxs[index],
index: index,
+ local_ccxs: self.local_ccxs,
})
}
}
/// The iterator produced by `CrateContext::maybe_iter`.
pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> {
shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccxs: &'a [LocalCrateContext<'tcx>],
index: usize,
single: bool,
origin: usize,
type Item = (CrateContext<'a, 'tcx>, bool);
fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> {
- if self.index >= self.shared.local_ccxs.len() {
+ if self.index >= self.local_ccxs.len() {
return None;
}
let index = self.index;
self.index += 1;
if self.single {
- self.index = self.shared.local_ccxs.len();
+ self.index = self.local_ccxs.len();
}
let ccx = CrateContext {
shared: self.shared,
- local: &self.shared.local_ccxs[index],
index: index,
+ local_ccxs: self.local_ccxs
};
Some((ccx, index == self.origin))
}
}
impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
- pub fn new(crate_name: &str,
- local_count: usize,
- tcx: &'b TyCtxt<'tcx>,
+ pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>,
mir_map: &'b MirMap<'tcx>,
export_map: ExportMap,
symbol_hasher: Sha256,
// start) and then strongly recommending static linkage on MSVC!
let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc;
- let mut shared_ccx = SharedCrateContext {
- local_ccxs: Vec::with_capacity(local_count),
+ SharedCrateContext {
metadata_llmod: metadata_llmod,
metadata_llcx: metadata_llcx,
export_map: export_map,
available_drop_glues: RefCell::new(FnvHashMap()),
use_dll_storage_attrs: use_dll_storage_attrs,
translation_items: RefCell::new(FnvHashMap()),
- };
-
- for i in 0..local_count {
- // Append ".rs" to crate name as LLVM module identifier.
- //
- // LLVM code generator emits a ".file filename" directive
- // for ELF backends. Value of the "filename" is set as the
- // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
- // crashes if the module identifier is same as other symbols
- // such as a function name in the module.
- // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
- let llmod_id = format!("{}.{}.rs", crate_name, i);
- let local_ccx = LocalCrateContext::new(&shared_ccx, &llmod_id[..]);
- shared_ccx.local_ccxs.push(local_ccx);
- }
-
- shared_ccx
- }
-
- pub fn iter<'a>(&'a self) -> CrateContextIterator<'a, 'tcx> {
- CrateContextIterator {
- shared: self,
- index: 0,
+ trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
}
}
- pub fn get_ccx<'a>(&'a self, index: usize) -> CrateContext<'a, 'tcx> {
- CrateContext {
- shared: self,
- local: &self.local_ccxs[index],
- index: index,
- }
- }
-
- fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a, 'tcx> {
- let (local_ccx, index) =
- self.local_ccxs
- .iter()
- .zip(0..self.local_ccxs.len())
- .min_by_key(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
- .unwrap();
- CrateContext {
- shared: self,
- local: local_ccx,
- index: index,
- }
- }
-
-
pub fn metadata_llmod(&self) -> ModuleRef {
self.metadata_llmod
}
&self.item_symbols
}
+ pub fn trait_cache(&self) -> &RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>> {
+ &self.trait_cache
+ }
+
pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
&self.link_meta
}
- pub fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
+ pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.tcx
}
pub fn use_dll_storage_attrs(&self) -> bool {
self.use_dll_storage_attrs
}
+
+ pub fn get_mir(&self, def_id: DefId) -> Option<CachedMir<'b, 'tcx>> {
+ if def_id.is_local() {
+ let node_id = self.tcx.map.as_local_node_id(def_id).unwrap();
+ self.mir_map.map.get(&node_id).map(CachedMir::Ref)
+ } else {
+ if let Some(mir) = self.mir_cache.borrow().get(&def_id).cloned() {
+ return Some(CachedMir::Owned(mir));
+ }
+
+ let mir = self.sess().cstore.maybe_get_item_mir(self.tcx, def_id);
+ let cached = mir.map(Rc::new);
+ if let Some(ref mir) = cached {
+ self.mir_cache.borrow_mut().insert(def_id, mir.clone());
+ }
+ cached.map(CachedMir::Owned)
+ }
+ }
+
+ pub fn translation_items(&self) -> &RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>> {
+ &self.translation_items
+ }
}
impl<'tcx> LocalCrateContext<'tcx> {
fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
- name: &str)
+ codegen_unit: CodegenUnit<'tcx>)
-> LocalCrateContext<'tcx> {
unsafe {
- let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
+ // Append ".rs" to LLVM module identifier.
+ //
+ // LLVM code generator emits a ".file filename" directive
+ // for ELF backends. Value of the "filename" is set as the
+ // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
+ // crashes if the module identifier is same as other symbols
+ // such as a function name in the module.
+ // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
+ let llmod_id = format!("{}.rs", codegen_unit.name);
+
+ let (llcx, llmod) = create_context_and_module(&shared.tcx.sess,
+ &llmod_id[..]);
let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
Some(debuginfo::CrateDebugContext::new(llmod))
None
};
- let mut local_ccx = LocalCrateContext {
+ let local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
+ codegen_unit: codegen_unit,
tn: TypeNames::new(),
needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()),
fn_pointer_shims: RefCell::new(FnvHashMap()),
intrinsics: RefCell::new(FnvHashMap()),
n_llvm_insns: Cell::new(0),
type_of_depth: Cell::new(0),
- trait_cache: RefCell::new(DepTrackingMap::new(shared.tcx
- .dep_graph
- .clone())),
};
- local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
- local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared));
-
- // Done mutating local_ccx directly. (The rest of the
- // initialization goes through RefCell.)
- {
- let ccx = local_ccx.dummy_ccx(shared);
+ let (int_type, opaque_vec_type, str_slice_ty, mut local_ccx) = {
+ // Do a little dance to create a dummy CrateContext, so we can
+ // create some things in the LLVM module of this codegen unit
+ let mut local_ccxs = vec![local_ccx];
+ let (int_type, opaque_vec_type, str_slice_ty) = {
+ let dummy_ccx = LocalCrateContext::dummy_ccx(shared,
+ local_ccxs.as_mut_slice());
+ let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice");
+ str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx),
+ Type::int(&dummy_ccx)],
+ false);
+ (Type::int(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty)
+ };
+ (int_type, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap())
+ };
- let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
- str_slice_ty.set_struct_body(&[Type::i8p(&ccx), ccx.int_type()], false);
- ccx.tn().associate_type("str_slice", &str_slice_ty);
+ local_ccx.int_type = int_type;
+ local_ccx.opaque_vec_type = opaque_vec_type;
+ local_ccx.tn.associate_type("str_slice", &str_slice_ty);
- if ccx.sess().count_llvm_insns() {
- base::init_insn_ctxt()
- }
+ if shared.tcx.sess.count_llvm_insns() {
+ base::init_insn_ctxt()
}
local_ccx
/// Create a dummy `CrateContext` from `self` and the provided
/// `SharedCrateContext`. This is somewhat dangerous because `self` may
- /// not actually be an element of `shared.local_ccxs`, which can cause some
- /// operations to panic unexpectedly.
+ /// not be fully initialized.
///
/// This is used in the `LocalCrateContext` constructor to allow calling
/// functions that expect a complete `CrateContext`, even before the local
/// portion is fully initialized and attached to the `SharedCrateContext`.
- fn dummy_ccx<'a>(&'a self, shared: &'a SharedCrateContext<'a, 'tcx>)
+ fn dummy_ccx<'a>(shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccxs: &'a [LocalCrateContext<'tcx>])
-> CrateContext<'a, 'tcx> {
+ assert!(local_ccxs.len() == 1);
CrateContext {
shared: shared,
- local: self,
- index: !0 as usize,
+ index: 0,
+ local_ccxs: local_ccxs
}
}
}
}
pub fn local(&self) -> &'b LocalCrateContext<'tcx> {
- self.local
+ &self.local_ccxs[self.index]
}
/// Get a (possibly) different `CrateContext` from the same
/// `SharedCrateContext`.
- pub fn rotate(&self) -> CrateContext<'b, 'tcx> {
- self.shared.get_smallest_ccx()
+ pub fn rotate(&'b self) -> CrateContext<'b, 'tcx> {
+ let (_, index) =
+ self.local_ccxs
+ .iter()
+ .zip(0..self.local_ccxs.len())
+ .min_by_key(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
+ .unwrap();
+ CrateContext {
+ shared: self.shared,
+ index: index,
+ local_ccxs: &self.local_ccxs[..],
+ }
}
/// Either iterate over only `self`, or iterate over all `CrateContext`s in
index: if iter_all { 0 } else { self.index },
single: !iter_all,
origin: self.index,
+ local_ccxs: self.local_ccxs,
}
}
-
- pub fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
+ pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.shared.tcx
}
}
pub fn raw_builder<'a>(&'a self) -> BuilderRef {
- self.local.builder.b
+ self.local().builder.b
}
pub fn get_intrinsic(&self, key: &str) -> ValueRef {
}
pub fn llmod(&self) -> ModuleRef {
- self.local.llmod
+ self.local().llmod
}
pub fn llcx(&self) -> ContextRef {
- self.local.llcx
+ self.local().llcx
+ }
+
+ pub fn codegen_unit(&self) -> &CodegenUnit<'tcx> {
+ &self.local().codegen_unit
}
pub fn td(&self) -> llvm::TargetDataRef {
}
pub fn tn<'a>(&'a self) -> &'a TypeNames {
- &self.local.tn
+ &self.local().tn
}
pub fn export_map<'a>(&'a self) -> &'a ExportMap {
}
pub fn needs_unwind_cleanup_cache(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, bool>> {
- &self.local.needs_unwind_cleanup_cache
+ &self.local().needs_unwind_cleanup_cache
}
pub fn fn_pointer_shims(&self) -> &RefCell<FnvHashMap<Ty<'tcx>, ValueRef>> {
- &self.local.fn_pointer_shims
+ &self.local().fn_pointer_shims
}
pub fn drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>> {
- &self.local.drop_glues
+ &self.local().drop_glues
}
pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
- &self.local.external
+ &self.local().external
}
pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<DefId>> {
- &self.local.external_srcs
+ &self.local().external_srcs
}
pub fn instances<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
- &self.local.instances
+ &self.local().instances
}
pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>> {
- &self.local.monomorphizing
+ &self.local().monomorphizing
}
pub fn vtables<'a>(&'a self) -> &'a RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>> {
- &self.local.vtables
+ &self.local().vtables
}
pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<InternedString, ValueRef>> {
- &self.local.const_cstr_cache
+ &self.local().const_cstr_cache
}
pub fn const_unsized<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
- &self.local.const_unsized
+ &self.local().const_unsized
}
pub fn const_globals<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
- &self.local.const_globals
+ &self.local().const_globals
}
pub fn const_values<'a>(&'a self) -> &'a RefCell<FnvHashMap<(ast::NodeId, &'tcx Substs<'tcx>),
ValueRef>> {
- &self.local.const_values
+ &self.local().const_values
}
pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
- &self.local.extern_const_values
+ &self.local().extern_const_values
}
pub fn statics<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, DefId>> {
- &self.local.statics
+ &self.local().statics
}
pub fn impl_method_cache<'a>(&'a self)
-> &'a RefCell<FnvHashMap<(DefId, ast::Name), DefId>> {
- &self.local.impl_method_cache
+ &self.local().impl_method_cache
}
pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<FnvHashMap<ValueRef, ValueRef>> {
- &self.local.closure_bare_wrapper_cache
+ &self.local().closure_bare_wrapper_cache
}
pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell<Vec<(ValueRef, ValueRef)>> {
- &self.local.statics_to_rauw
+ &self.local().statics_to_rauw
}
pub fn lltypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
- &self.local.lltypes
+ &self.local().lltypes
}
pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Type>> {
- &self.local.llsizingtypes
+ &self.local().llsizingtypes
}
pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, Rc<adt::Repr<'tcx>>>> {
- &self.local.adt_reprs
+ &self.local().adt_reprs
}
pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
}
pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, String>> {
- &self.local.type_hashcodes
+ &self.local().type_hashcodes
}
pub fn stats<'a>(&'a self) -> &'a Stats {
}
pub fn int_type(&self) -> Type {
- self.local.int_type
+ self.local().int_type
}
pub fn opaque_vec_type(&self) -> Type {
- self.local.opaque_vec_type
+ self.local().opaque_vec_type
}
pub fn closure_vals<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
- &self.local.closure_vals
+ &self.local().closure_vals
}
pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
- &self.local.dbg_cx
+ &self.local().dbg_cx
}
pub fn eh_personality<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
- &self.local.eh_personality
+ &self.local().eh_personality
}
pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
- &self.local.eh_unwind_resume
+ &self.local().eh_unwind_resume
}
pub fn rust_try_fn<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
- &self.local.rust_try_fn
+ &self.local().rust_try_fn
}
fn intrinsics<'a>(&'a self) -> &'a RefCell<FnvHashMap<&'static str, ValueRef>> {
- &self.local.intrinsics
+ &self.local().intrinsics
}
pub fn count_llvm_insn(&self) {
- self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
- }
-
- pub fn trait_cache(&self) -> &RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>> {
- &self.local.trait_cache
+ self.local().n_llvm_insns.set(self.local().n_llvm_insns.get() + 1);
}
pub fn obj_size_bound(&self) -> u64 {
}
pub fn enter_type_of(&self, ty: Ty<'tcx>) -> TypeOfDepthLock<'b, 'tcx> {
- let current_depth = self.local.type_of_depth.get();
+ let current_depth = self.local().type_of_depth.get();
debug!("enter_type_of({:?}) at depth {:?}", ty, current_depth);
if current_depth > self.sess().recursion_limit.get() {
self.sess().fatal(
&format!("overflow representing the type `{}`", ty))
}
- self.local.type_of_depth.set(current_depth + 1);
- TypeOfDepthLock(self.local)
+ self.local().type_of_depth.set(current_depth + 1);
+ TypeOfDepthLock(self.local())
}
pub fn check_overflow(&self) -> bool {
}
pub fn get_mir(&self, def_id: DefId) -> Option<CachedMir<'b, 'tcx>> {
- if def_id.is_local() {
- let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
- self.shared.mir_map.map.get(&node_id).map(CachedMir::Ref)
- } else {
- if let Some(mir) = self.shared.mir_cache.borrow().get(&def_id).cloned() {
- return Some(CachedMir::Owned(mir));
- }
-
- let mir = self.sess().cstore.maybe_get_item_mir(self.tcx(), def_id);
- let cached = mir.map(Rc::new);
- if let Some(ref mir) = cached {
- self.shared.mir_cache.borrow_mut().insert(def_id, mir.clone());
- }
- cached.map(CachedMir::Owned)
- }
+ self.shared.get_mir(def_id)
}
pub fn translation_items(&self) -> &RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>> {
* affine values (since they must never be duplicated).
*/
- assert!(!self.ty
- .moves_by_default(&bcx.tcx().empty_parameter_environment(), DUMMY_SP));
+ assert!(!self.ty.moves_by_default(bcx.tcx(),
+ &bcx.tcx().empty_parameter_environment(), DUMMY_SP));
self.shallow_copy_raw(bcx, dst)
}
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::hir::pat_util;
use rustc::ty::subst;
use rustc::hir::map as hir_map;
unique_type_id.push_str("struct ");
from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id);
},
- ty::TyTuple(ref component_types) if component_types.is_empty() => {
+ ty::TyTuple(component_types) if component_types.is_empty() => {
push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
},
- ty::TyTuple(ref component_types) => {
+ ty::TyTuple(component_types) => {
unique_type_id.push_str("tuple ");
for &component_type in component_types {
let component_type_id =
unique_type_id.push_str(" fn(");
let sig = cx.tcx().erase_late_bound_regions(sig);
- let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+ let sig = cx.tcx().normalize_associated_type(&sig);
for ¶meter_type in &sig.inputs {
let parameter_type_id =
}
}
},
- ty::TyClosure(_, ref substs) if substs.upvar_tys.is_empty() => {
+ ty::TyClosure(_, substs) if substs.upvar_tys.is_empty() => {
push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
},
- ty::TyClosure(_, ref substs) => {
+ ty::TyClosure(_, substs) => {
unique_type_id.push_str("closure ");
- for upvar_type in &substs.upvar_tys {
+ for upvar_type in substs.upvar_tys {
let upvar_type_id =
self.get_unique_type_id_of_type(cx, upvar_type);
let upvar_type_id =
use abi::Abi;
use common::{NodeIdAndSpan, CrateContext, FunctionContext, Block, BlockAndBuilder};
use monomorphize::Instance;
-use rustc::infer::normalize_associated_type;
use rustc::ty::{self, Ty};
use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
use util::nodemap::{DefIdMap, NodeMap, FnvHashMap, FnvHashSet};
}
if abi == Abi::RustCall && !sig.inputs.is_empty() {
- if let ty::TyTuple(ref args) = sig.inputs[sig.inputs.len() - 1].sty {
+ if let ty::TyTuple(args) = sig.inputs[sig.inputs.len() - 1].sty {
for &argument_type in args {
signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
}
name_to_append_suffix_to.push('<');
for (i, &actual_type) in actual_types.iter().enumerate() {
- let actual_type = normalize_associated_type(cx.tcx(), &actual_type);
+ let actual_type = cx.tcx().normalize_associated_type(&actual_type);
// Add actual type name to <...> clause of function name
let actual_type_name = compute_debuginfo_type_name(cx,
actual_type,
// Again, only create type information if full debuginfo is enabled
let template_params: Vec<_> = if cx.sess().opts.debuginfo == FullDebugInfo {
generics.types.as_slice().iter().enumerate().map(|(i, param)| {
- let actual_type = normalize_associated_type(cx.tcx(), &actual_types[i]);
+ let actual_type = cx.tcx().normalize_associated_type(&actual_types[i]);
let actual_type_metadata = type_metadata(cx, actual_type, codemap::DUMMY_SP);
let name = CString::new(param.name.as_str().as_bytes()).unwrap();
unsafe {
use common::CrateContext;
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::ty::subst;
use rustc::ty::{self, Ty};
push_item_name(cx, def.did, qualified, output);
push_type_params(cx, substs, output);
},
- ty::TyTuple(ref component_types) => {
+ ty::TyTuple(component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions(sig);
- let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+ let sig = cx.tcx().normalize_associated_type(&sig);
if !sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
//! * When in doubt, define.
use llvm::{self, ValueRef};
use rustc::ty;
-use rustc::infer;
use abi::{Abi, FnType};
use attributes;
use context::CrateContext;
debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type);
let abi = fn_type.fn_abi();
let sig = ccx.tcx().erase_late_bound_regions(fn_type.fn_sig());
- let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
+ let sig = ccx.tcx().normalize_associated_type(&sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let fty = FnType::new(ccx, abi, &sig, &[]);
let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
assert!(target.kind.is_by_ref());
- let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
+ let kind = custom_coerce_unsize_info(bcx.ccx().shared(),
+ source.ty,
+ target.ty);
let repr_source = adt::represent_type(bcx.ccx(), source.ty);
let src_fields = match &*repr_source {
base: &hir::Expr,
get_idx: F)
-> DatumBlock<'blk, 'tcx, Expr> where
- F: FnOnce(&'blk TyCtxt<'tcx>, &VariantInfo<'tcx>) -> usize,
+ F: FnOnce(TyCtxt<'blk, 'tcx, 'tcx>, &VariantInfo<'tcx>) -> usize,
{
let mut bcx = bcx;
let _icx = push_ctxt("trans_rec_field");
// the key we need to find the closure-kind and
// closure-type etc.
let (def_id, substs) = match expr_ty(bcx, expr).sty {
- ty::TyClosure(def_id, ref substs) => (def_id, substs),
+ ty::TyClosure(def_id, substs) => (def_id, substs),
ref t =>
span_bug!(
expr.span,
}
}
-pub fn cast_is_noop<'tcx>(tcx: &TyCtxt<'tcx>,
- expr: &hir::Expr,
- t_in: Ty<'tcx>,
- t_out: Ty<'tcx>)
- -> bool {
+pub fn cast_is_noop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ expr: &hir::Expr,
+ t_in: Ty<'tcx>,
+ t_out: Ty<'tcx>)
+ -> bool {
if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
return true;
}
let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
bcx.ccx().get_intrinsic(&name)
}
- fn to_intrinsic_name(&self, tcx: &TyCtxt, ty: Ty) -> &'static str {
+ fn to_intrinsic_name(&self, tcx: TyCtxt, ty: Ty) -> &'static str {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use rustc::ty::{TyInt, TyUint};
RvalueStmt
}
-fn expr_kind(tcx: &TyCtxt, expr: &hir::Expr) -> ExprKind {
+fn expr_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, expr: &hir::Expr) -> ExprKind {
if tcx.is_method_call(expr.id) {
// Overloaded operations are generally calls, and hence they are
// generated via DPS, but there are a few exceptions:
}
}
-pub fn type_needs_drop<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ty: Ty<'tcx>) -> bool {
tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
}
-pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
t: Ty<'tcx>) -> Ty<'tcx> {
- let tcx = ccx.tcx();
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !type_is_sized(tcx, t) {
- return ccx.tcx().erase_regions(&t);
+ return tcx.erase_regions(&t);
}
// FIXME (#22815): note that type_needs_drop conservatively
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
- if !type_needs_drop(&tcx, t) {
+ if !type_needs_drop(tcx, t) {
return tcx.types.i8;
}
match t.sty {
- ty::TyBox(typ) if !type_needs_drop(&tcx, typ)
+ ty::TyBox(typ) if !type_needs_drop(tcx, typ)
&& type_is_sized(tcx, typ) => {
- let llty = sizing_type_of(ccx, typ);
- // `Box<ZeroSizeType>` does not allocate.
- if llsize_of_alloc(ccx, llty) == 0 {
- tcx.types.i8
- } else {
- ccx.tcx().erase_regions(&t)
- }
+ tcx.normalizing_infer_ctxt(traits::ProjectionMode::Any).enter(|infcx| {
+ let layout = t.layout(&infcx).unwrap();
+ if layout.size(&tcx.data_layout).bytes() == 0 {
+ // `Box<ZeroSizeType>` does not allocate.
+ tcx.types.i8
+ } else {
+ tcx.erase_regions(&t)
+ }
+ })
}
- _ => ccx.tcx().erase_regions(&t)
+ _ => tcx.erase_regions(&t)
}
}
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
- let glue_type = get_drop_glue_type(ccx, t);
+ let glue_type = get_drop_glue_type(ccx.tcx(), t);
let ptr = if glue_type != t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
} else {
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {:?}", g);
- let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
+ let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t));
debug!("drop glue type {:?}", g);
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
def_id: tcx.lang_items.drop_trait().unwrap(),
substs: tcx.mk_substs(Substs::empty().with_self_ty(t))
});
- let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) {
+ let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) {
traits::VtableImpl(data) => data,
_ => bug!("dtor for {:?} is not an impl???", t)
};
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
-> Block<'blk, 'tcx> {
- if collector::collecting_debug_information(bcx.ccx()) {
+ if collector::collecting_debug_information(bcx.ccx().shared()) {
bcx.ccx()
.record_translation_item_as_generated(TransItem::DropGlue(g));
}
use libc;
use llvm;
use llvm::{ValueRef, TypeKind};
-use rustc::infer;
use rustc::ty::subst;
use rustc::ty::subst::FnSpace;
use abi::{Abi, FnType};
let (def_id, substs, sig) = match callee_ty.sty {
ty::TyFnDef(def_id, substs, fty) => {
let sig = tcx.erase_late_bound_regions(&fty.sig);
- (def_id, substs, infer::normalize_associated_type(tcx, &sig))
+ (def_id, substs, tcx.normalize_associated_type(&sig))
}
_ => bug!("expected fn item type, found {}", callee_ty)
};
// We're generating an IR snippet that looks like:
//
// declare i32 @rust_try(%func, %data, %ptr) {
- // %slot = alloca i8*
- // call @llvm.localescape(%slot)
- // store %ptr, %slot
+ // %slot = alloca i64*
// invoke %func(%data) to label %normal unwind label %catchswitch
//
// normal:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
// catchpad:
- // %tok = catchpad within %cs [%rust_try_filter]
+ // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
+ // %ptr[0] = %slot[0]
+ // %ptr[1] = %slot[1]
// catchret from %tok to label %caught
//
// caught:
// ret i32 1
// }
//
- // This structure follows the basic usage of the instructions in LLVM
- // (see their documentation/test cases for examples), but a
- // perhaps-surprising part here is the usage of the `localescape`
- // intrinsic. This is used to allow the filter function (also generated
- // here) to access variables on the stack of this intrinsic. This
- // ability enables us to transfer information about the exception being
- // thrown to this point, where we're catching the exception.
+ // This structure follows the basic usage of throw/try/catch in LLVM.
+ // For example, compile this C++ snippet to see what LLVM generates:
+ //
+ // #include <stdint.h>
+ //
+ // int bar(void (*foo)(void), uint64_t *ret) {
+ // try {
+ // foo();
+ // return 0;
+ // } catch(uint64_t a[2]) {
+ // ret[0] = a[0];
+ // ret[1] = a[1];
+ // return 1;
+ // }
+ // }
//
// More information can be found in libstd's seh.rs implementation.
- let slot = Alloca(bcx, Type::i8p(ccx), "slot");
- let localescape = ccx.get_intrinsic(&"llvm.localescape");
- Call(bcx, localescape, &[slot], dloc);
- Store(bcx, local_ptr, slot);
+ let i64p = Type::i64(ccx).ptr_to();
+ let slot = Alloca(bcx, i64p, "slot");
Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
Ret(normal, C_i32(ccx, 0), dloc);
let cs = CatchSwitch(catchswitch, None, None, 1);
AddHandler(catchswitch, cs, catchpad.llbb);
- let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
- let filter = BitCast(catchpad, filter, Type::i8p(ccx));
- let tok = CatchPad(catchpad, cs, &[filter]);
+ let tcx = ccx.tcx();
+ let tydesc = match tcx.lang_items.msvc_try_filter() {
+ Some(did) => ::consts::get_static(ccx, did).to_llref(),
+ None => bug!("msvc_try_filter not defined"),
+ };
+ let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
+ let addr = Load(catchpad, slot);
+ let arg1 = Load(catchpad, addr);
+ let val1 = C_i32(ccx, 1);
+ let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1]));
+ let local_ptr = BitCast(catchpad, local_ptr, i64p);
+ Store(catchpad, arg1, local_ptr);
+ Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1]));
CatchRet(catchpad, tok, caught.llbb);
Ret(caught, C_i32(ccx, 1), dloc);
};
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
- let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
+ let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(sig)
- });
+ }));
let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
let (fcx, block_arena);
block_arena = TypedArena::new();
// Define the type up front for the signature of the rust_try function.
let tcx = ccx.tcx();
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
- let fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
+ let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnOutput::FnConverging(tcx.mk_nil()),
variadic: false,
}),
- });
+ }));
let output = ty::FnOutput::FnConverging(tcx.types.i32);
let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
ccx.rust_try_fn().set(Some(rust_try));
return rust_try
}
-// For MSVC-style exceptions (SEH), the compiler generates a filter function
-// which is used to determine whether an exception is being caught (e.g. if it's
-// a Rust exception or some other).
-//
-// This function is used to generate said filter function. The shim generated
-// here is actually just a thin wrapper to call the real implementation in the
-// standard library itself. For reasons as to why, see seh.rs in the standard
-// library.
-fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
- rust_try_fn: ValueRef)
- -> ValueRef {
- let ccx = fcx.ccx;
- let tcx = ccx.tcx();
- let dloc = DebugLoc::None;
-
- let rust_try_filter = match tcx.lang_items.msvc_try_filter() {
- Some(did) => {
- Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
- }
- None => bug!("msvc_try_filter not defined"),
- };
-
- let output = ty::FnOutput::FnConverging(tcx.types.i32);
- let i8p = tcx.mk_mut_ptr(tcx.types.i8);
-
- let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
- let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
- let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
-
- // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
- // the base pointer, we follow the standard layout of:
- //
- // block:
- // %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
- // %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
- // %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
- // ret i32 %ret
- //
- // The recoverfp intrinsic is used to recover the frame pointer of the
- // `rust_try_fn` function, which is then in turn passed to the
- // `localrecover` intrinsic (pairing with the `localescape` intrinsic
- // mentioned above). Putting all this together means that we now have a
- // handle to the arguments passed into the `try` function, allowing writing
- // to the stack over there.
- //
- // For more info, see seh.rs in the standard library.
- let do_trans = |bcx: Block, ehptrs, base_pointer| {
- let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
- let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], dloc);
- let arg = Call(bcx, localrecover,
- &[rust_try_fn, parentfp, C_i32(ccx, 0)], dloc);
- let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], dloc);
- Ret(bcx, ret, dloc);
- };
-
- if ccx.tcx().sess.target.target.arch == "x86" {
- // On x86 the filter function doesn't actually receive any arguments.
- // Instead the %ebp register contains some contextual information.
- //
- // Unfortunately I don't know of any great documentation as to what's
- // going on here, all I can say is that there's a few tests cases in
- // LLVM's test suite which follow this pattern of instructions, so we
- // just do the same.
- gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| {
- let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc);
- let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
- let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
- do_trans(bcx, exn, ebp);
- })
- } else if ccx.tcx().sess.target.target.arch == "x86_64" {
- // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
- // are passed in as arguments to the filter function, so we just pass
- // those along.
- gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| {
- let exn = llvm::get_param(bcx.fcx.llfn, 0);
- let rbp = llvm::get_param(bcx.fcx.llfn, 1);
- do_trans(bcx, exn, rbp);
- })
- } else {
- bug!("unknown target to generate a filter function")
- }
-}
-
fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
span_err!(a, b, E0511, "{}", c);
}
let tcx = bcx.tcx();
let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
- let sig = infer::normalize_associated_type(tcx, &sig);
+ let sig = tcx.normalize_associated_type(&sig);
let arg_tys = sig.inputs;
// every intrinsic takes a SIMD vector as its first argument
use back::symbol_names;
use llvm::{ValueRef, get_params};
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::ty::subst::{FnSpace, Subst, Substs};
use rustc::ty::subst;
use rustc::traits::{self, ProjectionMode};
method_ty);
let sig = tcx.erase_late_bound_regions(&method_ty.fn_sig());
- let sig = infer::normalize_associated_type(tcx, &sig);
+ let sig = tcx.normalize_associated_type(&sig);
let fn_ty = FnType::new(ccx, method_ty.fn_abi(), &sig, &[]);
let function_name =
// Not in the cache. Build it.
let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| {
- let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref.clone());
+ let vtable = fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref.clone());
match vtable {
// Should default trait error here?
traits::VtableDefaultImpl(_) |
substs,
nested: _ }) => {
let nullptr = C_null(Type::nil(ccx).ptr_to());
- get_vtable_methods(ccx, id, substs)
+ get_vtable_methods(tcx, id, substs)
.into_iter()
.map(|opt_mth| opt_mth.map_or(nullptr, |mth| {
Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val
vtable
}
-pub fn get_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn get_vtable_methods<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl_id: DefId,
substs: &'tcx subst::Substs<'tcx>)
-> Vec<Option<ImplMethod<'tcx>>>
{
- let tcx = ccx.tcx();
-
debug!("get_vtable_methods(impl_id={:?}, substs={:?}", impl_id, substs);
let trt_id = match tcx.impl_trait_ref(impl_id) {
let name = trait_method_type.name;
// Some methods cannot be called on an object; skip those.
- if !traits::is_vtable_safe_method(tcx, trt_id, &trait_method_type) {
+ if !tcx.is_vtable_safe_method(trt_id, &trait_method_type) {
debug!("get_vtable_methods: not vtable safe");
return None;
}
// try and trans it, in that case. Issue #23435.
if mth.is_provided {
let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs);
- if !normalize_and_test_predicates(ccx, predicates.into_vec()) {
+ if !normalize_and_test_predicates(tcx, predicates.into_vec()) {
debug!("get_vtable_methods: predicates do not hold");
return None;
}
}
/// Locates the applicable definition of a method, given its name.
-pub fn get_impl_method<'tcx>(tcx: &TyCtxt<'tcx>,
- impl_def_id: DefId,
- substs: &'tcx Substs<'tcx>,
- name: Name)
- -> ImplMethod<'tcx>
+pub fn get_impl_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ impl_def_id: DefId,
+ substs: &'tcx Substs<'tcx>,
+ name: Name)
+ -> ImplMethod<'tcx>
{
assert!(!substs.types.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
- let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
match trait_def.ancestors(impl_def_id).fn_defs(tcx, name).next() {
Some(node_item) => {
+ let substs = tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
+ let substs = traits::translate_substs(&infcx, impl_def_id,
+ substs, node_item.node);
+ tcx.lift(&substs).unwrap_or_else(|| {
+ bug!("trans::meth::get_impl_method: translate_substs \
+ returned {:?} which contains inference types/regions",
+ substs);
+ })
+ });
ImplMethod {
method: node_item.item,
- substs: traits::translate_substs(&infcx, impl_def_id, substs, node_item.node),
+ substs: substs,
is_provided: node_item.node.is_from_trait(),
}
}
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
- let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty);
+ let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty);
let llvalue = if drop_ty != ty {
bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
} else {
let extra_args = &args[sig.inputs.len()..];
let extra_args = extra_args.iter().map(|op_arg| {
- self.mir.operand_ty(bcx.tcx(), op_arg)
+ let op_ty = self.mir.operand_ty(bcx.tcx(), op_arg);
+ bcx.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args);
use rustc_const_eval::ErrKind;
use rustc_const_math::ConstInt::*;
use rustc::hir::def_id::DefId;
+use rustc::infer::TransNormalize;
use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use rustc::traits;
let trait_id = trait_item.container().id();
let substs = instance.substs;
let trait_ref = ty::Binder(substs.to_trait_ref(ccx.tcx(), trait_id));
- let vtable = common::fulfill_obligation(ccx, DUMMY_SP, trait_ref);
+ let vtable = common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref);
if let traits::VtableImpl(vtable_impl) = vtable {
let name = ccx.tcx().item_name(instance.def);
for ac in ccx.tcx().associated_consts(vtable_impl.impl_def_id) {
}
fn monomorphize<T>(&self, value: &T) -> T
- where T : TypeFoldable<'tcx>
+ where T: TransNormalize<'tcx>
{
monomorphize::apply_param_substs(self.ccx.tcx(),
self.substs,
return Ok(Const::new(C_null(llty), ty));
}
- let substs = self.ccx.tcx().mk_substs(self.monomorphize(substs));
+ let substs = self.monomorphize(&substs);
let instance = Instance::new(def_id, substs);
MirConstContext::trans_def(self.ccx, instance, vec![])
}
span: DUMMY_SP
},
DUMMY_NODE_ID, def_id,
- &self.monomorphize(substs));
+ self.monomorphize(&substs));
}
let val = if let mir::AggregateKind::Adt(adt_def, index, _) = *kind {
return Const::new(C_null(llty), ty);
}
- let substs = bcx.tcx().mk_substs(bcx.monomorphize(substs));
+ let substs = bcx.monomorphize(&substs);
let instance = Instance::new(def_id, substs);
MirConstContext::trans_def(bcx.ccx(), instance, vec![])
}
use adt;
use base;
use builder::Builder;
-use common::{self, BlockAndBuilder, CrateContext, C_uint};
+use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef};
use consts;
use machine;
+use type_of::type_of;
use mir::drop;
-use llvm;
use Disr;
use std::ptr;
// Ergo, we return an undef ValueRef, so we do not have to special-case every
// place using lvalues, and could use it the same way you use a regular
// ReturnPointer LValue (i.e. store into it, load from it etc).
- let llty = fcx.fn_ty.ret.original_ty.ptr_to();
- unsafe {
- llvm::LLVMGetUndef(llty.to_ref())
- }
+ C_undef(fcx.fn_ty.ret.original_ty.ptr_to())
};
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
ret
}
TempRef::Operand(Some(_)) => {
- bug!("Lvalue temp already set");
+ let lvalue_ty = self.mir.lvalue_ty(bcx.tcx(), lvalue);
+ let lvalue_ty = bcx.monomorphize(&lvalue_ty);
+
+ // See comments in TempRef::new_operand as to why
+ // we always have Some in a ZST TempRef::Operand.
+ let ty = lvalue_ty.to_ty(bcx.tcx());
+ if common::type_is_zero_size(bcx.ccx(), ty) {
+ // Pass an undef pointer as no stores can actually occur.
+ let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to());
+ f(self, LvalueRef::new_sized(llptr, lvalue_ty))
+ } else {
+ bug!("Lvalue temp already set");
+ }
}
}
}
}
mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
+ let cast_ty = bcx.monomorphize(&cast_ty);
+
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
span: DUMMY_SP
},
DUMMY_NODE_ID, def_id,
- &bcx.monomorphize(substs));
+ bcx.monomorphize(&substs));
}
for (i, operand) in operands.iter().enumerate() {
use llvm::ValueRef;
use llvm;
use rustc::hir::def_id::DefId;
-use rustc::infer::normalize_associated_type;
+use rustc::infer::TransNormalize;
use rustc::ty::subst;
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
assert!(substs.regions.iter().all(|&r| r == ty::ReStatic));
Instance { def: def_id, substs: substs }
}
- pub fn mono(tcx: &TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
- Instance::new(def_id, &tcx.mk_substs(Substs::empty()))
+ pub fn mono<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Instance<'tcx> {
+ Instance::new(def_id, tcx.mk_substs(Substs::empty()))
}
}
/// Monomorphizes a type from the AST by first applying the in-scope
/// substitutions and then normalizing any associated types.
-pub fn apply_param_substs<'tcx,T>(tcx: &TyCtxt<'tcx>,
- param_substs: &Substs<'tcx>,
- value: &T)
- -> T
- where T : TypeFoldable<'tcx>
+pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_substs: &Substs<'tcx>,
+ value: &T)
+ -> T
+ where T: TransNormalize<'tcx>
{
let substituted = value.subst(tcx, param_substs);
- normalize_associated_type(tcx, &substituted)
+ tcx.normalize_associated_type(&substituted)
}
/// Returns the normalized type of a struct field
-pub fn field_ty<'tcx>(tcx: &TyCtxt<'tcx>,
- param_substs: &Substs<'tcx>,
- f: ty::FieldDef<'tcx>)
- -> Ty<'tcx>
+pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_substs: &Substs<'tcx>,
+ f: ty::FieldDef<'tcx>)
+ -> Ty<'tcx>
{
- normalize_associated_type(tcx, &f.ty(tcx, param_substs))
+ tcx.normalize_associated_type(&f.ty(tcx, param_substs))
}
// Anything we can't find a proper codegen unit for goes into this.
const FALLBACK_CODEGEN_UNIT: &'static str = "__rustc_fallback_codegen_unit";
-pub fn partition<'tcx, I>(tcx: &TyCtxt<'tcx>,
- trans_items: I,
- strategy: PartitioningStrategy,
- reference_map: &ReferenceMap<'tcx>)
- -> Vec<CodegenUnit<'tcx>>
+pub fn partition<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ trans_items: I,
+ strategy: PartitioningStrategy,
+ reference_map: &ReferenceMap<'tcx>)
+ -> Vec<CodegenUnit<'tcx>>
where I: Iterator<Item = TransItem<'tcx>>
{
// In the first step, we place all regular translation items into their
struct PostInliningPartitioning<'tcx>(Vec<CodegenUnit<'tcx>>);
struct PostDeclarationsPartitioning<'tcx>(Vec<CodegenUnit<'tcx>>);
-fn place_root_translation_items<'tcx, I>(tcx: &TyCtxt<'tcx>,
- trans_items: I)
- -> PreInliningPartitioning<'tcx>
+fn place_root_translation_items<'a, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ trans_items: I)
+ -> PreInliningPartitioning<'tcx>
where I: Iterator<Item = TransItem<'tcx>>
{
let mut roots = FnvHashSet();
fn merge_codegen_units<'tcx>(initial_partitioning: &mut PreInliningPartitioning<'tcx>,
target_cgu_count: usize,
crate_name: &str) {
- if target_cgu_count >= initial_partitioning.codegen_units.len() {
- return;
- }
-
assert!(target_cgu_count >= 1);
let codegen_units = &mut initial_partitioning.codegen_units;
}
for (index, cgu) in codegen_units.iter_mut().enumerate() {
- cgu.name = token::intern_and_get_ident(&format!("{}.{}", crate_name, index)[..]);
+ cgu.name = numbered_codegen_unit_name(crate_name, index);
+ }
+
+ // If the initial partitioning contained less than target_cgu_count to begin
+ // with, we won't have enough codegen units here, so add a empty units until
+ // we reach the target count
+ while codegen_units.len() < target_cgu_count {
+ let index = codegen_units.len();
+ codegen_units.push(CodegenUnit {
+ name: numbered_codegen_unit_name(crate_name, index),
+ items: FnvHashMap()
+ });
+ }
+
+ fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString {
+ token::intern_and_get_ident(&format!("{}.{}", crate_name, index)[..])
}
}
PostDeclarationsPartitioning(codegen_units)
}
-fn characteristic_def_id_of_trans_item<'tcx>(tcx: &TyCtxt<'tcx>,
- trans_item: TransItem<'tcx>)
- -> Option<DefId> {
+fn characteristic_def_id_of_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ trans_item: TransItem<'tcx>)
+ -> Option<DefId> {
match trans_item {
TransItem::Fn(instance) => {
// If this is a method, we want to put it into the same module as
}
}
-fn compute_codegen_unit_name<'tcx>(tcx: &TyCtxt<'tcx>,
- def_id: DefId,
- volatile: bool)
- -> InternedString {
+fn compute_codegen_unit_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ volatile: bool)
+ -> InternedString {
// Unfortunately we cannot just use the `ty::item_path` infrastructure here
// because we need paths to modules and the DefIds of those are not
// available anymore for external items.
#![allow(non_camel_case_types)]
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::ty::subst;
use abi::FnType;
use adt;
cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
// FIXME(eddyb) Temporary sanity check for ty::layout.
- let infcx = infer::normalizing_infer_ctxt(cx.tcx(), &cx.tcx().tables, ProjectionMode::Any);
- match t.layout(&infcx) {
+ let layout = cx.tcx().normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
+ t.layout(&infcx)
+ });
+ match layout {
Ok(layout) => {
if !type_is_sized(cx.tcx(), t) {
if !layout.is_unsized() {
ty::TyFnDef(..) => Type::nil(cx),
ty::TyFnPtr(f) => {
let sig = cx.tcx().erase_late_bound_regions(&f.sig);
- let sig = infer::normalize_associated_type(cx.tcx(), &sig);
+ let sig = cx.tcx().normalize_associated_type(&sig);
FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to()
}
ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx),
//! case but `&a` in the second. Basically, defaults that appear inside
//! an rptr (`&r.T`) use the region `r` that appears in the rptr.
-use middle::astconv_util::{prim_ty_to_ty, prohibit_type_params, prohibit_projection};
use middle::const_val::ConstVal;
use rustc_const_eval::{eval_const_expr_partial, ConstEvalErr};
use rustc_const_eval::EvalHint::UncheckedExprHint;
use rustc::hir;
use rustc_back::slice;
-pub trait AstConv<'tcx> {
- fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx>;
+pub trait AstConv<'gcx, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>;
/// Identify the type scheme for an item with a type, like a type
/// alias, fn, or struct. This allows you to figure out the set of
/// are in scope into free ones. This function should only return Some
/// within a fn body.
/// See ParameterEnvironment::free_substs for more information.
- fn get_free_substs(&self) -> Option<&Substs<'tcx>> {
- None
- }
+ fn get_free_substs(&self) -> Option<&Substs<'tcx>>;
/// What type should we use when a type is omitted?
- fn ty_infer(&self,
- param_and_substs: Option<ty::TypeParameterDef<'tcx>>,
- substs: Option<&mut Substs<'tcx>>,
- space: Option<ParamSpace>,
- span: Span) -> Ty<'tcx>;
+ fn ty_infer(&self,
+ param_and_substs: Option<ty::TypeParameterDef<'tcx>>,
+ substs: Option<&mut Substs<'tcx>>,
+ space: Option<ParamSpace>,
+ span: Span) -> Ty<'tcx>;
/// Projecting an associated type from a (potentially)
/// higher-ranked trait reference is more complicated, because of
span: Span,
poly_trait_ref: ty::PolyTraitRef<'tcx>,
item_name: ast::Name)
- -> Ty<'tcx>
- {
- if let Some(trait_ref) = self.tcx().no_late_bound_regions(&poly_trait_ref) {
- self.projected_ty(span, trait_ref, item_name)
- } else {
- // no late-bound regions, we can just ignore the binder
- span_err!(self.tcx().sess, span, E0212,
- "cannot extract an associated type from a higher-ranked trait bound \
- in this context");
- self.tcx().types.err
- }
- }
+ -> Ty<'tcx>;
/// Project an associated type from a non-higher-ranked trait reference.
/// This is fairly straightforward and can be accommodated in any context.
fn set_tainted_by_errors(&self);
}
-pub fn ast_region_to_region(tcx: &TyCtxt, lifetime: &hir::Lifetime)
+#[derive(PartialEq, Eq)]
+pub enum PathParamMode {
+ // Any path in a type context.
+ Explicit,
+ // The `module::Type` in `module::Type::method` in an expression.
+ Optional
+}
+
+struct ConvertedBinding<'tcx> {
+ item_name: ast::Name,
+ ty: Ty<'tcx>,
+ span: Span,
+}
+
+struct SelfInfo<'a, 'tcx> {
+ untransformed_self_ty: Ty<'tcx>,
+ explicit_self: &'a hir::ExplicitSelf,
+}
+
+type TraitAndProjections<'tcx> = (ty::PolyTraitRef<'tcx>, Vec<ty::PolyProjectionPredicate<'tcx>>);
+
+pub fn ast_region_to_region(tcx: TyCtxt, lifetime: &hir::Lifetime)
-> ty::Region {
let r = match tcx.named_region_map.get(&lifetime.id) {
None => {
}
}
-pub fn opt_ast_region_to_region<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- default_span: Span,
- opt_lifetime: &Option<hir::Lifetime>) -> ty::Region
-{
- let r = match *opt_lifetime {
- Some(ref lifetime) => {
- ast_region_to_region(this.tcx(), lifetime)
- }
+impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o {
+ pub fn opt_ast_region_to_region(&self,
+ rscope: &RegionScope,
+ default_span: Span,
+ opt_lifetime: &Option<hir::Lifetime>) -> ty::Region
+ {
+ let r = match *opt_lifetime {
+ Some(ref lifetime) => {
+ ast_region_to_region(self.tcx(), lifetime)
+ }
- None => match rscope.anon_regions(default_span, 1) {
- Ok(rs) => rs[0],
- Err(params) => {
- let mut err = struct_span_err!(this.tcx().sess, default_span, E0106,
- "missing lifetime specifier");
- if let Some(params) = params {
- report_elision_failure(&mut err, params);
+ None => match rscope.anon_regions(default_span, 1) {
+ Ok(rs) => rs[0],
+ Err(params) => {
+ let mut err = struct_span_err!(self.tcx().sess, default_span, E0106,
+ "missing lifetime specifier");
+ if let Some(params) = params {
+ report_elision_failure(&mut err, params);
+ }
+ err.emit();
+ ty::ReStatic
}
- err.emit();
- ty::ReStatic
}
- }
- };
-
- debug!("opt_ast_region_to_region(opt_lifetime={:?}) yields {:?}",
- opt_lifetime,
- r);
+ };
- r
-}
+ debug!("opt_ast_region_to_region(opt_lifetime={:?}) yields {:?}",
+ opt_lifetime,
+ r);
-/// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
-/// returns an appropriate set of substitutions for this particular reference to `I`.
-pub fn ast_path_substs_for_ty<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- decl_generics: &ty::Generics<'tcx>,
- item_segment: &hir::PathSegment)
- -> Substs<'tcx>
-{
- let tcx = this.tcx();
-
- // ast_path_substs() is only called to convert paths that are
- // known to refer to traits, types, or structs. In these cases,
- // all type parameters defined for the item being referenced will
- // be in the TypeSpace or SelfSpace.
- //
- // Note: in the case of traits, the self parameter is also
- // defined, but we don't currently create a `type_param_def` for
- // `Self` because it is implicit.
- assert!(decl_generics.regions.all(|d| d.space == TypeSpace));
- assert!(decl_generics.types.all(|d| d.space != FnSpace));
-
- let (regions, types, assoc_bindings) = match item_segment.parameters {
- hir::AngleBracketedParameters(ref data) => {
- convert_angle_bracketed_parameters(this, rscope, span, decl_generics, data)
- }
- hir::ParenthesizedParameters(..) => {
- span_err!(tcx.sess, span, E0214,
- "parenthesized parameters may only be used with a trait");
- let ty_param_defs = decl_generics.types.get_slice(TypeSpace);
- (Substs::empty(),
- ty_param_defs.iter().map(|_| tcx.types.err).collect(),
- vec![])
- }
- };
+ r
+ }
- prohibit_projections(this.tcx(), &assoc_bindings);
+ /// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
+ /// returns an appropriate set of substitutions for this particular reference to `I`.
+ pub fn ast_path_substs_for_ty(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ decl_generics: &ty::Generics<'tcx>,
+ item_segment: &hir::PathSegment)
+ -> Substs<'tcx>
+ {
+ let tcx = self.tcx();
+
+ // ast_path_substs() is only called to convert paths that are
+ // known to refer to traits, types, or structs. In these cases,
+ // all type parameters defined for the item being referenced will
+ // be in the TypeSpace or SelfSpace.
+ //
+ // Note: in the case of traits, the self parameter is also
+ // defined, but we don't currently create a `type_param_def` for
+ // `Self` because it is implicit.
+ assert!(decl_generics.regions.all(|d| d.space == TypeSpace));
+ assert!(decl_generics.types.all(|d| d.space != FnSpace));
+
+ let (regions, types, assoc_bindings) = match item_segment.parameters {
+ hir::AngleBracketedParameters(ref data) => {
+ self.convert_angle_bracketed_parameters(rscope, span, decl_generics, data)
+ }
+ hir::ParenthesizedParameters(..) => {
+ span_err!(tcx.sess, span, E0214,
+ "parenthesized parameters may only be used with a trait");
+ let ty_param_defs = decl_generics.types.get_slice(TypeSpace);
+ (Substs::empty(),
+ ty_param_defs.iter().map(|_| tcx.types.err).collect(),
+ vec![])
+ }
+ };
- create_substs_for_ast_path(this,
- span,
- param_mode,
- decl_generics,
- None,
- types,
- regions)
-}
+ assoc_bindings.first().map(|b| self.tcx().prohibit_projection(b.span));
-#[derive(PartialEq, Eq)]
-pub enum PathParamMode {
- // Any path in a type context.
- Explicit,
- // The `module::Type` in `module::Type::method` in an expression.
- Optional
-}
+ self.create_substs_for_ast_path(span,
+ param_mode,
+ decl_generics,
+ None,
+ types,
+ regions)
+ }
-fn create_region_substs<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- decl_generics: &ty::Generics<'tcx>,
- regions_provided: Vec<ty::Region>)
- -> Substs<'tcx>
-{
- let tcx = this.tcx();
-
- // If the type is parameterized by this region, then replace this
- // region with the current anon region binding (in other words,
- // whatever & would get replaced with).
- let expected_num_region_params = decl_generics.regions.len(TypeSpace);
- let supplied_num_region_params = regions_provided.len();
- let regions = if expected_num_region_params == supplied_num_region_params {
- regions_provided
- } else {
- let anon_regions =
- rscope.anon_regions(span, expected_num_region_params);
+ fn create_region_substs(&self,
+ rscope: &RegionScope,
+ span: Span,
+ decl_generics: &ty::Generics<'tcx>,
+ regions_provided: Vec<ty::Region>)
+ -> Substs<'tcx>
+ {
+ let tcx = self.tcx();
+
+ // If the type is parameterized by this region, then replace this
+ // region with the current anon region binding (in other words,
+ // whatever & would get replaced with).
+ let expected_num_region_params = decl_generics.regions.len(TypeSpace);
+ let supplied_num_region_params = regions_provided.len();
+ let regions = if expected_num_region_params == supplied_num_region_params {
+ regions_provided
+ } else {
+ let anon_regions =
+ rscope.anon_regions(span, expected_num_region_params);
- if supplied_num_region_params != 0 || anon_regions.is_err() {
- report_lifetime_number_error(tcx, span,
- supplied_num_region_params,
- expected_num_region_params);
- }
+ if supplied_num_region_params != 0 || anon_regions.is_err() {
+ report_lifetime_number_error(tcx, span,
+ supplied_num_region_params,
+ expected_num_region_params);
+ }
- match anon_regions {
- Ok(anon_regions) => anon_regions,
- Err(_) => (0..expected_num_region_params).map(|_| ty::ReStatic).collect()
- }
- };
- Substs::new_type(vec![], regions)
-}
+ match anon_regions {
+ Ok(anon_regions) => anon_regions,
+ Err(_) => (0..expected_num_region_params).map(|_| ty::ReStatic).collect()
+ }
+ };
+ Substs::new_type(vec![], regions)
+ }
-/// Given the type/region arguments provided to some path (along with
-/// an implicit Self, if this is a trait reference) returns the complete
-/// set of substitutions. This may involve applying defaulted type parameters.
-///
-/// Note that the type listing given here is *exactly* what the user provided.
-///
-/// The `region_substs` should be the result of `create_region_substs`
-/// -- that is, a substitution with no types but the correct number of
-/// regions.
-fn create_substs_for_ast_path<'tcx>(
- this: &AstConv<'tcx>,
- span: Span,
- param_mode: PathParamMode,
- decl_generics: &ty::Generics<'tcx>,
- self_ty: Option<Ty<'tcx>>,
- types_provided: Vec<Ty<'tcx>>,
- region_substs: Substs<'tcx>)
- -> Substs<'tcx>
-{
- let tcx = this.tcx();
-
- debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}, \
- types_provided={:?}, region_substs={:?})",
- decl_generics, self_ty, types_provided,
- region_substs);
-
- assert_eq!(region_substs.regions.len(TypeSpace), decl_generics.regions.len(TypeSpace));
- assert!(region_substs.types.is_empty());
-
- // Convert the type parameters supplied by the user.
- let ty_param_defs = decl_generics.types.get_slice(TypeSpace);
- let formal_ty_param_count = ty_param_defs.len();
- let required_ty_param_count = ty_param_defs.iter()
- .take_while(|x| x.default.is_none())
- .count();
-
- let mut type_substs = get_type_substs_for_defs(this,
- span,
- types_provided,
- param_mode,
- ty_param_defs,
- region_substs.clone(),
- self_ty);
-
- let supplied_ty_param_count = type_substs.len();
- check_type_argument_count(this.tcx(), span, supplied_ty_param_count,
- required_ty_param_count, formal_ty_param_count);
-
- if supplied_ty_param_count < required_ty_param_count {
- while type_substs.len() < required_ty_param_count {
- type_substs.push(tcx.types.err);
+ /// Given the type/region arguments provided to some path (along with
+ /// an implicit Self, if this is a trait reference) returns the complete
+ /// set of substitutions. This may involve applying defaulted type parameters.
+ ///
+ /// Note that the type listing given here is *exactly* what the user provided.
+ ///
+ /// The `region_substs` should be the result of `create_region_substs`
+ /// -- that is, a substitution with no types but the correct number of
+ /// regions.
+ fn create_substs_for_ast_path(&self,
+ span: Span,
+ param_mode: PathParamMode,
+ decl_generics: &ty::Generics<'tcx>,
+ self_ty: Option<Ty<'tcx>>,
+ types_provided: Vec<Ty<'tcx>>,
+ region_substs: Substs<'tcx>)
+ -> Substs<'tcx>
+ {
+ let tcx = self.tcx();
+
+ debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}, \
+ types_provided={:?}, region_substs={:?})",
+ decl_generics, self_ty, types_provided,
+ region_substs);
+
+ assert_eq!(region_substs.regions.len(TypeSpace), decl_generics.regions.len(TypeSpace));
+ assert!(region_substs.types.is_empty());
+
+ // Convert the type parameters supplied by the user.
+ let ty_param_defs = decl_generics.types.get_slice(TypeSpace);
+ let formal_ty_param_count = ty_param_defs.len();
+ let required_ty_param_count = ty_param_defs.iter()
+ .take_while(|x| x.default.is_none())
+ .count();
+
+ let mut type_substs = self.get_type_substs_for_defs(span,
+ types_provided,
+ param_mode,
+ ty_param_defs,
+ region_substs.clone(),
+ self_ty);
+
+ let supplied_ty_param_count = type_substs.len();
+ check_type_argument_count(self.tcx(), span, supplied_ty_param_count,
+ required_ty_param_count, formal_ty_param_count);
+
+ if supplied_ty_param_count < required_ty_param_count {
+ while type_substs.len() < required_ty_param_count {
+ type_substs.push(tcx.types.err);
+ }
+ } else if supplied_ty_param_count > formal_ty_param_count {
+ type_substs.truncate(formal_ty_param_count);
}
- } else if supplied_ty_param_count > formal_ty_param_count {
- type_substs.truncate(formal_ty_param_count);
- }
- assert!(type_substs.len() >= required_ty_param_count &&
- type_substs.len() <= formal_ty_param_count);
+ assert!(type_substs.len() >= required_ty_param_count &&
+ type_substs.len() <= formal_ty_param_count);
- let mut substs = region_substs;
- substs.types.extend(TypeSpace, type_substs.into_iter());
+ let mut substs = region_substs;
+ substs.types.extend(TypeSpace, type_substs.into_iter());
- match self_ty {
- None => {
- // If no self-type is provided, it's still possible that
- // one was declared, because this could be an object type.
- }
- Some(ty) => {
- // If a self-type is provided, one should have been
- // "declared" (in other words, this should be a
- // trait-ref).
- assert!(decl_generics.types.get_self().is_some());
- substs.types.push(SelfSpace, ty);
+ match self_ty {
+ None => {
+ // If no self-type is provided, it's still possible that
+ // one was declared, because this could be an object type.
+ }
+ Some(ty) => {
+ // If a self-type is provided, one should have been
+ // "declared" (in other words, this should be a
+ // trait-ref).
+ assert!(decl_generics.types.get_self().is_some());
+ substs.types.push(SelfSpace, ty);
+ }
}
- }
- let actual_supplied_ty_param_count = substs.types.len(TypeSpace);
- for param in &ty_param_defs[actual_supplied_ty_param_count..] {
- if let Some(default) = param.default {
- // If we are converting an object type, then the
- // `Self` parameter is unknown. However, some of the
- // other type parameters may reference `Self` in their
- // defaults. This will lead to an ICE if we are not
- // careful!
- if self_ty.is_none() && default.has_self_ty() {
- span_err!(tcx.sess, span, E0393,
- "the type parameter `{}` must be explicitly specified \
- in an object type because its default value `{}` references \
- the type `Self`",
- param.name,
- default);
- substs.types.push(TypeSpace, tcx.types.err);
+ let actual_supplied_ty_param_count = substs.types.len(TypeSpace);
+ for param in &ty_param_defs[actual_supplied_ty_param_count..] {
+ if let Some(default) = param.default {
+ // If we are converting an object type, then the
+ // `Self` parameter is unknown. However, some of the
+ // other type parameters may reference `Self` in their
+ // defaults. This will lead to an ICE if we are not
+ // careful!
+ if self_ty.is_none() && default.has_self_ty() {
+ span_err!(tcx.sess, span, E0393,
+ "the type parameter `{}` must be explicitly specified \
+ in an object type because its default value `{}` references \
+ the type `Self`",
+ param.name,
+ default);
+ substs.types.push(TypeSpace, tcx.types.err);
+ } else {
+ // This is a default type parameter.
+ let default = default.subst_spanned(tcx,
+ &substs,
+ Some(span));
+ substs.types.push(TypeSpace, default);
+ }
} else {
- // This is a default type parameter.
- let default = default.subst_spanned(tcx,
- &substs,
- Some(span));
- substs.types.push(TypeSpace, default);
+ span_bug!(span, "extra parameter without default");
}
- } else {
- span_bug!(span, "extra parameter without default");
}
- }
- debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}",
- decl_generics, self_ty, substs);
+ debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}",
+ decl_generics, self_ty, substs);
- substs
-}
+ substs
+ }
-/// Returns types_provided if it is not empty, otherwise populating the
-/// type parameters with inference variables as appropriate.
-fn get_type_substs_for_defs<'tcx>(this: &AstConv<'tcx>,
- span: Span,
- types_provided: Vec<Ty<'tcx>>,
- param_mode: PathParamMode,
- ty_param_defs: &[ty::TypeParameterDef<'tcx>],
- mut substs: Substs<'tcx>,
- self_ty: Option<Ty<'tcx>>)
- -> Vec<Ty<'tcx>>
-{
- fn default_type_parameter<'tcx>(p: &ty::TypeParameterDef<'tcx>, self_ty: Option<Ty<'tcx>>)
- -> Option<ty::TypeParameterDef<'tcx>>
+ /// Returns types_provided if it is not empty, otherwise populating the
+ /// type parameters with inference variables as appropriate.
+ fn get_type_substs_for_defs(&self,
+ span: Span,
+ types_provided: Vec<Ty<'tcx>>,
+ param_mode: PathParamMode,
+ ty_param_defs: &[ty::TypeParameterDef<'tcx>],
+ mut substs: Substs<'tcx>,
+ self_ty: Option<Ty<'tcx>>)
+ -> Vec<Ty<'tcx>>
{
- if let Some(ref default) = p.default {
- if self_ty.is_none() && default.has_self_ty() {
- // There is no suitable inference default for a type parameter
- // that references self with no self-type provided.
- return None;
+ fn default_type_parameter<'tcx>(p: &ty::TypeParameterDef<'tcx>, self_ty: Option<Ty<'tcx>>)
+ -> Option<ty::TypeParameterDef<'tcx>>
+ {
+ if let Some(ref default) = p.default {
+ if self_ty.is_none() && default.has_self_ty() {
+ // There is no suitable inference default for a type parameter
+ // that references self with no self-type provided.
+ return None;
+ }
}
+
+ Some(p.clone())
}
- Some(p.clone())
+ if param_mode == PathParamMode::Optional && types_provided.is_empty() {
+ ty_param_defs
+ .iter()
+ .map(|p| self.ty_infer(default_type_parameter(p, self_ty), Some(&mut substs),
+ Some(TypeSpace), span))
+ .collect()
+ } else {
+ types_provided
+ }
}
- if param_mode == PathParamMode::Optional && types_provided.is_empty() {
- ty_param_defs
- .iter()
- .map(|p| this.ty_infer(default_type_parameter(p, self_ty), Some(&mut substs),
- Some(TypeSpace), span))
- .collect()
- } else {
- types_provided
+ fn convert_angle_bracketed_parameters(&self,
+ rscope: &RegionScope,
+ span: Span,
+ decl_generics: &ty::Generics<'tcx>,
+ data: &hir::AngleBracketedParameterData)
+ -> (Substs<'tcx>,
+ Vec<Ty<'tcx>>,
+ Vec<ConvertedBinding<'tcx>>)
+ {
+ let regions: Vec<_> =
+ data.lifetimes.iter()
+ .map(|l| ast_region_to_region(self.tcx(), l))
+ .collect();
+
+ let region_substs =
+ self.create_region_substs(rscope, span, decl_generics, regions);
+
+ let types: Vec<_> =
+ data.types.iter()
+ .enumerate()
+ .map(|(i,t)| self.ast_ty_arg_to_ty(rscope, decl_generics,
+ i, ®ion_substs, t))
+ .collect();
+
+ let assoc_bindings: Vec<_> =
+ data.bindings.iter()
+ .map(|b| ConvertedBinding { item_name: b.name,
+ ty: self.ast_ty_to_ty(rscope, &b.ty),
+ span: b.span })
+ .collect();
+
+ (region_substs, types, assoc_bindings)
}
-}
-struct ConvertedBinding<'tcx> {
- item_name: ast::Name,
- ty: Ty<'tcx>,
- span: Span,
-}
+ /// Returns the appropriate lifetime to use for any output lifetimes
+ /// (if one exists) and a vector of the (pattern, number of lifetimes)
+ /// corresponding to each input type/pattern.
+ fn find_implied_output_region(&self,
+ input_tys: &[Ty<'tcx>],
+ input_pats: Vec<String>) -> ElidedLifetime
+ {
+ let tcx = self.tcx();
+ let mut lifetimes_for_params = Vec::new();
+ let mut possible_implied_output_region = None;
+
+ for (input_type, input_pat) in input_tys.iter().zip(input_pats) {
+ let mut regions = FnvHashSet();
+ let have_bound_regions = tcx.collect_regions(input_type, &mut regions);
+
+ debug!("find_implied_output_regions: collected {:?} from {:?} \
+ have_bound_regions={:?}", ®ions, input_type, have_bound_regions);
+
+ if regions.len() == 1 {
+ // there's a chance that the unique lifetime of this
+ // iteration will be the appropriate lifetime for output
+ // parameters, so lets store it.
+ possible_implied_output_region = regions.iter().cloned().next();
+ }
-fn convert_angle_bracketed_parameters<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- decl_generics: &ty::Generics<'tcx>,
- data: &hir::AngleBracketedParameterData)
- -> (Substs<'tcx>,
- Vec<Ty<'tcx>>,
- Vec<ConvertedBinding<'tcx>>)
-{
- let regions: Vec<_> =
- data.lifetimes.iter()
- .map(|l| ast_region_to_region(this.tcx(), l))
- .collect();
+ lifetimes_for_params.push(ElisionFailureInfo {
+ name: input_pat,
+ lifetime_count: regions.len(),
+ have_bound_regions: have_bound_regions
+ });
+ }
- let region_substs =
- create_region_substs(this, rscope, span, decl_generics, regions);
+ if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::<usize>() == 1 {
+ Ok(possible_implied_output_region.unwrap())
+ } else {
+ Err(Some(lifetimes_for_params))
+ }
+ }
- let types: Vec<_> =
- data.types.iter()
- .enumerate()
- .map(|(i,t)| ast_ty_arg_to_ty(this, rscope, decl_generics,
- i, ®ion_substs, t))
- .collect();
+ fn convert_ty_with_lifetime_elision(&self,
+ elided_lifetime: ElidedLifetime,
+ ty: &hir::Ty)
+ -> Ty<'tcx>
+ {
+ match elided_lifetime {
+ Ok(implied_output_region) => {
+ let rb = ElidableRscope::new(implied_output_region);
+ self.ast_ty_to_ty(&rb, ty)
+ }
+ Err(param_lifetimes) => {
+ // All regions must be explicitly specified in the output
+ // if the lifetime elision rules do not apply. This saves
+ // the user from potentially-confusing errors.
+ let rb = UnelidableRscope::new(param_lifetimes);
+ self.ast_ty_to_ty(&rb, ty)
+ }
+ }
+ }
- let assoc_bindings: Vec<_> =
- data.bindings.iter()
- .map(|b| ConvertedBinding { item_name: b.name,
- ty: ast_ty_to_ty(this, rscope, &b.ty),
- span: b.span })
- .collect();
+ fn convert_parenthesized_parameters(&self,
+ rscope: &RegionScope,
+ span: Span,
+ decl_generics: &ty::Generics<'tcx>,
+ data: &hir::ParenthesizedParameterData)
+ -> (Substs<'tcx>,
+ Vec<Ty<'tcx>>,
+ Vec<ConvertedBinding<'tcx>>)
+ {
+ let region_substs =
+ self.create_region_substs(rscope, span, decl_generics, Vec::new());
- (region_substs, types, assoc_bindings)
-}
+ let binding_rscope = BindingRscope::new();
+ let inputs =
+ data.inputs.iter()
+ .map(|a_t| self.ast_ty_arg_to_ty(&binding_rscope, decl_generics,
+ 0, ®ion_substs, a_t))
+ .collect::<Vec<Ty<'tcx>>>();
-/// Returns the appropriate lifetime to use for any output lifetimes
-/// (if one exists) and a vector of the (pattern, number of lifetimes)
-/// corresponding to each input type/pattern.
-fn find_implied_output_region<'tcx>(tcx: &TyCtxt<'tcx>,
- input_tys: &[Ty<'tcx>],
- input_pats: Vec<String>) -> ElidedLifetime
-{
- let mut lifetimes_for_params = Vec::new();
- let mut possible_implied_output_region = None;
+ let input_params = vec![String::new(); inputs.len()];
+ let implied_output_region = self.find_implied_output_region(&inputs, input_params);
- for (input_type, input_pat) in input_tys.iter().zip(input_pats) {
- let mut regions = FnvHashSet();
- let have_bound_regions = tcx.collect_regions(input_type, &mut regions);
+ let input_ty = self.tcx().mk_tup(inputs);
- debug!("find_implied_output_regions: collected {:?} from {:?} \
- have_bound_regions={:?}", ®ions, input_type, have_bound_regions);
+ let (output, output_span) = match data.output {
+ Some(ref output_ty) => {
+ (self.convert_ty_with_lifetime_elision(implied_output_region, &output_ty),
+ output_ty.span)
+ }
+ None => {
+ (self.tcx().mk_nil(), data.span)
+ }
+ };
- if regions.len() == 1 {
- // there's a chance that the unique lifetime of this
- // iteration will be the appropriate lifetime for output
- // parameters, so lets store it.
- possible_implied_output_region = regions.iter().cloned().next();
- }
+ let output_binding = ConvertedBinding {
+ item_name: token::intern(FN_OUTPUT_NAME),
+ ty: output,
+ span: output_span
+ };
- lifetimes_for_params.push(ElisionFailureInfo {
- name: input_pat,
- lifetime_count: regions.len(),
- have_bound_regions: have_bound_regions
- });
+ (region_substs, vec![input_ty], vec![output_binding])
}
- if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::<usize>() == 1 {
- Ok(possible_implied_output_region.unwrap())
- } else {
- Err(Some(lifetimes_for_params))
+ pub fn instantiate_poly_trait_ref(&self,
+ rscope: &RegionScope,
+ ast_trait_ref: &hir::PolyTraitRef,
+ self_ty: Option<Ty<'tcx>>,
+ poly_projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
+ -> ty::PolyTraitRef<'tcx>
+ {
+ let trait_ref = &ast_trait_ref.trait_ref;
+ let trait_def_id = self.trait_def_id(trait_ref);
+ self.ast_path_to_poly_trait_ref(rscope,
+ trait_ref.path.span,
+ PathParamMode::Explicit,
+ trait_def_id,
+ self_ty,
+ trait_ref.path.segments.last().unwrap(),
+ poly_projections)
}
-}
-fn convert_ty_with_lifetime_elision<'tcx>(this: &AstConv<'tcx>,
- elided_lifetime: ElidedLifetime,
- ty: &hir::Ty)
- -> Ty<'tcx>
-{
- match elided_lifetime {
- Ok(implied_output_region) => {
- let rb = ElidableRscope::new(implied_output_region);
- ast_ty_to_ty(this, &rb, ty)
- }
- Err(param_lifetimes) => {
- // All regions must be explicitly specified in the output
- // if the lifetime elision rules do not apply. This saves
- // the user from potentially-confusing errors.
- let rb = UnelidableRscope::new(param_lifetimes);
- ast_ty_to_ty(this, &rb, ty)
- }
+ /// Instantiates the path for the given trait reference, assuming that it's
+ /// bound to a valid trait type. Returns the def_id for the defining trait.
+ /// Fails if the type is a type other than a trait type.
+ ///
+ /// If the `projections` argument is `None`, then assoc type bindings like `Foo<T=X>`
+ /// are disallowed. Otherwise, they are pushed onto the vector given.
+ pub fn instantiate_mono_trait_ref(&self,
+ rscope: &RegionScope,
+ trait_ref: &hir::TraitRef,
+ self_ty: Option<Ty<'tcx>>)
+ -> ty::TraitRef<'tcx>
+ {
+ let trait_def_id = self.trait_def_id(trait_ref);
+ self.ast_path_to_mono_trait_ref(rscope,
+ trait_ref.path.span,
+ PathParamMode::Explicit,
+ trait_def_id,
+ self_ty,
+ trait_ref.path.segments.last().unwrap())
}
-}
-
-fn convert_parenthesized_parameters<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- decl_generics: &ty::Generics<'tcx>,
- data: &hir::ParenthesizedParameterData)
- -> (Substs<'tcx>,
- Vec<Ty<'tcx>>,
- Vec<ConvertedBinding<'tcx>>)
-{
- let region_substs =
- create_region_substs(this, rscope, span, decl_generics, Vec::new());
-
- let binding_rscope = BindingRscope::new();
- let inputs =
- data.inputs.iter()
- .map(|a_t| ast_ty_arg_to_ty(this, &binding_rscope, decl_generics,
- 0, ®ion_substs, a_t))
- .collect::<Vec<Ty<'tcx>>>();
-
- let input_params = vec![String::new(); inputs.len()];
- let implied_output_region = find_implied_output_region(this.tcx(), &inputs, input_params);
-
- let input_ty = this.tcx().mk_tup(inputs);
-
- let (output, output_span) = match data.output {
- Some(ref output_ty) => {
- (convert_ty_with_lifetime_elision(this,
- implied_output_region,
- &output_ty),
- output_ty.span)
- }
- None => {
- (this.tcx().mk_nil(), data.span)
- }
- };
-
- let output_binding = ConvertedBinding {
- item_name: token::intern(FN_OUTPUT_NAME),
- ty: output,
- span: output_span
- };
-
- (region_substs, vec![input_ty], vec![output_binding])
-}
-pub fn instantiate_poly_trait_ref<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- ast_trait_ref: &hir::PolyTraitRef,
- self_ty: Option<Ty<'tcx>>,
- poly_projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
- -> ty::PolyTraitRef<'tcx>
-{
- let trait_ref = &ast_trait_ref.trait_ref;
- let trait_def_id = trait_def_id(this, trait_ref);
- ast_path_to_poly_trait_ref(this,
- rscope,
- trait_ref.path.span,
- PathParamMode::Explicit,
- trait_def_id,
- self_ty,
- trait_ref.path.segments.last().unwrap(),
- poly_projections)
-}
-
-/// Instantiates the path for the given trait reference, assuming that it's
-/// bound to a valid trait type. Returns the def_id for the defining trait.
-/// Fails if the type is a type other than a trait type.
-///
-/// If the `projections` argument is `None`, then assoc type bindings like `Foo<T=X>`
-/// are disallowed. Otherwise, they are pushed onto the vector given.
-pub fn instantiate_mono_trait_ref<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- trait_ref: &hir::TraitRef,
- self_ty: Option<Ty<'tcx>>)
- -> ty::TraitRef<'tcx>
-{
- let trait_def_id = trait_def_id(this, trait_ref);
- ast_path_to_mono_trait_ref(this,
- rscope,
- trait_ref.path.span,
- PathParamMode::Explicit,
- trait_def_id,
- self_ty,
- trait_ref.path.segments.last().unwrap())
-}
-
-fn trait_def_id<'tcx>(this: &AstConv<'tcx>, trait_ref: &hir::TraitRef) -> DefId {
- let path = &trait_ref.path;
- match ::lookup_full_def(this.tcx(), path.span, trait_ref.ref_id) {
- Def::Trait(trait_def_id) => trait_def_id,
- Def::Err => {
- this.tcx().sess.fatal("cannot continue compilation due to previous error");
- }
- _ => {
- span_fatal!(this.tcx().sess, path.span, E0245, "`{}` is not a trait",
- path);
+ fn trait_def_id(&self, trait_ref: &hir::TraitRef) -> DefId {
+ let path = &trait_ref.path;
+ match ::lookup_full_def(self.tcx(), path.span, trait_ref.ref_id) {
+ Def::Trait(trait_def_id) => trait_def_id,
+ Def::Err => {
+ self.tcx().sess.fatal("cannot continue compilation due to previous error");
+ }
+ _ => {
+ span_fatal!(self.tcx().sess, path.span, E0245, "`{}` is not a trait",
+ path);
+ }
}
}
-}
-
-fn object_path_to_poly_trait_ref<'a,'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- trait_def_id: DefId,
- trait_segment: &hir::PathSegment,
- mut projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
- -> ty::PolyTraitRef<'tcx>
-{
- ast_path_to_poly_trait_ref(this,
- rscope,
- span,
- param_mode,
- trait_def_id,
- None,
- trait_segment,
- projections)
-}
-fn ast_path_to_poly_trait_ref<'a,'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- trait_def_id: DefId,
- self_ty: Option<Ty<'tcx>>,
- trait_segment: &hir::PathSegment,
- poly_projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
- -> ty::PolyTraitRef<'tcx>
-{
- debug!("ast_path_to_poly_trait_ref(trait_segment={:?})", trait_segment);
- // The trait reference introduces a binding level here, so
- // we need to shift the `rscope`. It'd be nice if we could
- // do away with this rscope stuff and work this knowledge
- // into resolve_lifetimes, as we do with non-omitted
- // lifetimes. Oh well, not there yet.
- let shifted_rscope = &ShiftedRscope::new(rscope);
-
- let (substs, assoc_bindings) =
- create_substs_for_ast_trait_ref(this,
- shifted_rscope,
+ fn object_path_to_poly_trait_ref(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ trait_def_id: DefId,
+ trait_segment: &hir::PathSegment,
+ mut projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
+ -> ty::PolyTraitRef<'tcx>
+ {
+ self.ast_path_to_poly_trait_ref(rscope,
span,
param_mode,
trait_def_id,
- self_ty,
- trait_segment);
- let poly_trait_ref = ty::Binder(ty::TraitRef::new(trait_def_id, substs));
+ None,
+ trait_segment,
+ projections)
+ }
+ fn ast_path_to_poly_trait_ref(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ trait_def_id: DefId,
+ self_ty: Option<Ty<'tcx>>,
+ trait_segment: &hir::PathSegment,
+ poly_projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
+ -> ty::PolyTraitRef<'tcx>
{
- let converted_bindings =
- assoc_bindings
- .iter()
- .filter_map(|binding| {
- // specify type to assert that error was already reported in Err case:
- let predicate: Result<_, ErrorReported> =
- ast_type_binding_to_poly_projection_predicate(this,
- poly_trait_ref.clone(),
- self_ty,
- binding);
- predicate.ok() // ok to ignore Err() because ErrorReported (see above)
- });
- poly_projections.extend(converted_bindings);
+ debug!("ast_path_to_poly_trait_ref(trait_segment={:?})", trait_segment);
+ // The trait reference introduces a binding level here, so
+ // we need to shift the `rscope`. It'd be nice if we could
+ // do away with this rscope stuff and work this knowledge
+ // into resolve_lifetimes, as we do with non-omitted
+ // lifetimes. Oh well, not there yet.
+ let shifted_rscope = &ShiftedRscope::new(rscope);
+
+ let (substs, assoc_bindings) =
+ self.create_substs_for_ast_trait_ref(shifted_rscope,
+ span,
+ param_mode,
+ trait_def_id,
+ self_ty,
+ trait_segment);
+ let poly_trait_ref = ty::Binder(ty::TraitRef::new(trait_def_id, substs));
+
+ {
+ let converted_bindings =
+ assoc_bindings
+ .iter()
+ .filter_map(|binding| {
+ // specify type to assert that error was already reported in Err case:
+ let predicate: Result<_, ErrorReported> =
+ self.ast_type_binding_to_poly_projection_predicate(poly_trait_ref.clone(),
+ self_ty,
+ binding);
+ predicate.ok() // ok to ignore Err() because ErrorReported (see above)
+ });
+ poly_projections.extend(converted_bindings);
+ }
+
+ debug!("ast_path_to_poly_trait_ref(trait_segment={:?}, projections={:?}) -> {:?}",
+ trait_segment, poly_projections, poly_trait_ref);
+ poly_trait_ref
}
- debug!("ast_path_to_poly_trait_ref(trait_segment={:?}, projections={:?}) -> {:?}",
- trait_segment, poly_projections, poly_trait_ref);
- poly_trait_ref
-}
+ fn ast_path_to_mono_trait_ref(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ trait_def_id: DefId,
+ self_ty: Option<Ty<'tcx>>,
+ trait_segment: &hir::PathSegment)
+ -> ty::TraitRef<'tcx>
+ {
+ let (substs, assoc_bindings) =
+ self.create_substs_for_ast_trait_ref(rscope,
+ span,
+ param_mode,
+ trait_def_id,
+ self_ty,
+ trait_segment);
+ assoc_bindings.first().map(|b| self.tcx().prohibit_projection(b.span));
+ ty::TraitRef::new(trait_def_id, substs)
+ }
-fn ast_path_to_mono_trait_ref<'a,'tcx>(this: &AstConv<'tcx>,
+ fn create_substs_for_ast_trait_ref(&self,
rscope: &RegionScope,
span: Span,
param_mode: PathParamMode,
trait_def_id: DefId,
self_ty: Option<Ty<'tcx>>,
trait_segment: &hir::PathSegment)
- -> ty::TraitRef<'tcx>
-{
- let (substs, assoc_bindings) =
- create_substs_for_ast_trait_ref(this,
- rscope,
- span,
- param_mode,
- trait_def_id,
- self_ty,
- trait_segment);
- prohibit_projections(this.tcx(), &assoc_bindings);
- ty::TraitRef::new(trait_def_id, substs)
-}
+ -> (&'tcx Substs<'tcx>, Vec<ConvertedBinding<'tcx>>)
+ {
+ debug!("create_substs_for_ast_trait_ref(trait_segment={:?})",
+ trait_segment);
+
+ let trait_def = match self.get_trait_def(span, trait_def_id) {
+ Ok(trait_def) => trait_def,
+ Err(ErrorReported) => {
+ // No convenient way to recover from a cycle here. Just bail. Sorry!
+ self.tcx().sess.abort_if_errors();
+ bug!("ErrorReported returned, but no errors reports?")
+ }
+ };
-fn create_substs_for_ast_trait_ref<'a,'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- trait_def_id: DefId,
- self_ty: Option<Ty<'tcx>>,
- trait_segment: &hir::PathSegment)
- -> (&'tcx Substs<'tcx>, Vec<ConvertedBinding<'tcx>>)
-{
- debug!("create_substs_for_ast_trait_ref(trait_segment={:?})",
- trait_segment);
-
- let trait_def = match this.get_trait_def(span, trait_def_id) {
- Ok(trait_def) => trait_def,
- Err(ErrorReported) => {
- // No convenient way to recover from a cycle here. Just bail. Sorry!
- this.tcx().sess.abort_if_errors();
- bug!("ErrorReported returned, but no errors reports?")
- }
- };
+ let (regions, types, assoc_bindings) = match trait_segment.parameters {
+ hir::AngleBracketedParameters(ref data) => {
+ // For now, require that parenthetical notation be used
+ // only with `Fn()` etc.
+ if !self.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar {
+ emit_feature_err(&self.tcx().sess.parse_sess.span_diagnostic,
+ "unboxed_closures", span, GateIssue::Language,
+ "\
+ the precise format of `Fn`-family traits' \
+ type parameters is subject to change. \
+ Use parenthetical notation (Fn(Foo, Bar) -> Baz) instead");
+ }
- let (regions, types, assoc_bindings) = match trait_segment.parameters {
- hir::AngleBracketedParameters(ref data) => {
- // For now, require that parenthetical notation be used
- // only with `Fn()` etc.
- if !this.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar {
- emit_feature_err(&this.tcx().sess.parse_sess.span_diagnostic,
- "unboxed_closures", span, GateIssue::Language,
- "\
- the precise format of `Fn`-family traits' type parameters is \
- subject to change. Use parenthetical notation (Fn(Foo, Bar) -> Baz) instead");
+ self.convert_angle_bracketed_parameters(rscope, span, &trait_def.generics, data)
}
+ hir::ParenthesizedParameters(ref data) => {
+ // For now, require that parenthetical notation be used
+ // only with `Fn()` etc.
+ if !self.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar {
+ emit_feature_err(&self.tcx().sess.parse_sess.span_diagnostic,
+ "unboxed_closures", span, GateIssue::Language,
+ "\
+ parenthetical notation is only stable when used with `Fn`-family traits");
+ }
- convert_angle_bracketed_parameters(this, rscope, span, &trait_def.generics, data)
- }
- hir::ParenthesizedParameters(ref data) => {
- // For now, require that parenthetical notation be used
- // only with `Fn()` etc.
- if !this.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar {
- emit_feature_err(&this.tcx().sess.parse_sess.span_diagnostic,
- "unboxed_closures", span, GateIssue::Language,
- "\
- parenthetical notation is only stable when used with `Fn`-family traits");
+ self.convert_parenthesized_parameters(rscope, span, &trait_def.generics, data)
}
+ };
- convert_parenthesized_parameters(this, rscope, span, &trait_def.generics, data)
- }
- };
+ let substs = self.create_substs_for_ast_path(span,
+ param_mode,
+ &trait_def.generics,
+ self_ty,
+ types,
+ regions);
+
+ (self.tcx().mk_substs(substs), assoc_bindings)
+ }
- let substs = create_substs_for_ast_path(this,
- span,
- param_mode,
- &trait_def.generics,
- self_ty,
- types,
- regions);
+ fn ast_type_binding_to_poly_projection_predicate(&self,
+ mut trait_ref: ty::PolyTraitRef<'tcx>,
+ self_ty: Option<Ty<'tcx>>,
+ binding: &ConvertedBinding<'tcx>)
+ -> Result<ty::PolyProjectionPredicate<'tcx>, ErrorReported>
+ {
+ let tcx = self.tcx();
+
+ // Given something like `U : SomeTrait<T=X>`, we want to produce a
+ // predicate like `<U as SomeTrait>::T = X`. This is somewhat
+ // subtle in the event that `T` is defined in a supertrait of
+ // `SomeTrait`, because in that case we need to upcast.
+ //
+ // That is, consider this case:
+ //
+ // ```
+ // trait SubTrait : SuperTrait<int> { }
+ // trait SuperTrait<A> { type T; }
+ //
+ // ... B : SubTrait<T=foo> ...
+ // ```
+ //
+ // We want to produce `<B as SuperTrait<int>>::T == foo`.
+
+ // Simple case: X is defined in the current trait.
+ if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) {
+ return Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------+
+ projection_ty: ty::ProjectionTy { // |
+ trait_ref: trait_ref.skip_binder().clone(), // Binder moved here --+
+ item_name: binding.item_name,
+ },
+ ty: binding.ty,
+ }));
+ }
+
+ // Otherwise, we have to walk through the supertraits to find
+ // those that do. This is complicated by the fact that, for an
+ // object type, the `Self` type is not present in the
+ // substitutions (after all, it's being constructed right now),
+ // but the `supertraits` iterator really wants one. To handle
+ // this, we currently insert a dummy type and then remove it
+ // later. Yuck.
+
+ let dummy_self_ty = tcx.mk_infer(ty::FreshTy(0));
+ if self_ty.is_none() { // if converting for an object type
+ let mut dummy_substs = trait_ref.skip_binder().substs.clone(); // binder moved here -+
+ assert!(dummy_substs.self_ty().is_none()); // |
+ dummy_substs.types.push(SelfSpace, dummy_self_ty); // |
+ trait_ref = ty::Binder(ty::TraitRef::new(trait_ref.def_id(), // <------------+
+ tcx.mk_substs(dummy_substs)));
+ }
+
+ self.ensure_super_predicates(binding.span, trait_ref.def_id())?;
+
+ let mut candidates: Vec<ty::PolyTraitRef> =
+ traits::supertraits(tcx, trait_ref.clone())
+ .filter(|r| self.trait_defines_associated_type_named(r.def_id(), binding.item_name))
+ .collect();
+
+ // If converting for an object type, then remove the dummy-ty from `Self` now.
+ // Yuckety yuck.
+ if self_ty.is_none() {
+ for candidate in &mut candidates {
+ let mut dummy_substs = candidate.0.substs.clone();
+ assert!(dummy_substs.self_ty() == Some(dummy_self_ty));
+ dummy_substs.types.pop(SelfSpace);
+ *candidate = ty::Binder(ty::TraitRef::new(candidate.def_id(),
+ tcx.mk_substs(dummy_substs)));
+ }
+ }
- (this.tcx().mk_substs(substs), assoc_bindings)
-}
+ let candidate = self.one_bound_for_assoc_type(candidates,
+ &trait_ref.to_string(),
+ &binding.item_name.as_str(),
+ binding.span)?;
-fn ast_type_binding_to_poly_projection_predicate<'tcx>(
- this: &AstConv<'tcx>,
- mut trait_ref: ty::PolyTraitRef<'tcx>,
- self_ty: Option<Ty<'tcx>>,
- binding: &ConvertedBinding<'tcx>)
- -> Result<ty::PolyProjectionPredicate<'tcx>, ErrorReported>
-{
- let tcx = this.tcx();
-
- // Given something like `U : SomeTrait<T=X>`, we want to produce a
- // predicate like `<U as SomeTrait>::T = X`. This is somewhat
- // subtle in the event that `T` is defined in a supertrait of
- // `SomeTrait`, because in that case we need to upcast.
- //
- // That is, consider this case:
- //
- // ```
- // trait SubTrait : SuperTrait<int> { }
- // trait SuperTrait<A> { type T; }
- //
- // ... B : SubTrait<T=foo> ...
- // ```
- //
- // We want to produce `<B as SuperTrait<int>>::T == foo`.
-
- // Simple case: X is defined in the current trait.
- if this.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) {
- return Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------+
- projection_ty: ty::ProjectionTy { // |
- trait_ref: trait_ref.skip_binder().clone(), // Binder moved here --+
+ Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------------+
+ projection_ty: ty::ProjectionTy { // |
+ trait_ref: candidate.skip_binder().clone(), // binder is moved up here --+
item_name: binding.item_name,
},
ty: binding.ty,
- }));
+ }))
}
- // Otherwise, we have to walk through the supertraits to find
- // those that do. This is complicated by the fact that, for an
- // object type, the `Self` type is not present in the
- // substitutions (after all, it's being constructed right now),
- // but the `supertraits` iterator really wants one. To handle
- // this, we currently insert a dummy type and then remove it
- // later. Yuck.
-
- let dummy_self_ty = tcx.mk_infer(ty::FreshTy(0));
- if self_ty.is_none() { // if converting for an object type
- let mut dummy_substs = trait_ref.skip_binder().substs.clone(); // binder moved here -+
- assert!(dummy_substs.self_ty().is_none()); // |
- dummy_substs.types.push(SelfSpace, dummy_self_ty); // |
- trait_ref = ty::Binder(ty::TraitRef::new(trait_ref.def_id(), // <------------+
- tcx.mk_substs(dummy_substs)));
- }
+ fn ast_path_to_ty(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ did: DefId,
+ item_segment: &hir::PathSegment)
+ -> Ty<'tcx>
+ {
+ let tcx = self.tcx();
+ let (generics, decl_ty) = match self.get_item_type_scheme(span, did) {
+ Ok(ty::TypeScheme { generics, ty: decl_ty }) => {
+ (generics, decl_ty)
+ }
+ Err(ErrorReported) => {
+ return tcx.types.err;
+ }
+ };
- this.ensure_super_predicates(binding.span, trait_ref.def_id())?;
-
- let mut candidates: Vec<ty::PolyTraitRef> =
- traits::supertraits(tcx, trait_ref.clone())
- .filter(|r| this.trait_defines_associated_type_named(r.def_id(), binding.item_name))
- .collect();
-
- // If converting for an object type, then remove the dummy-ty from `Self` now.
- // Yuckety yuck.
- if self_ty.is_none() {
- for candidate in &mut candidates {
- let mut dummy_substs = candidate.0.substs.clone();
- assert!(dummy_substs.self_ty() == Some(dummy_self_ty));
- dummy_substs.types.pop(SelfSpace);
- *candidate = ty::Binder(ty::TraitRef::new(candidate.def_id(),
- tcx.mk_substs(dummy_substs)));
+ let substs = self.ast_path_substs_for_ty(rscope,
+ span,
+ param_mode,
+ &generics,
+ item_segment);
+
+ // FIXME(#12938): This is a hack until we have full support for DST.
+ if Some(did) == self.tcx().lang_items.owned_box() {
+ assert_eq!(substs.types.len(TypeSpace), 1);
+ return self.tcx().mk_box(*substs.types.get(TypeSpace, 0));
}
+
+ decl_ty.subst(self.tcx(), &substs)
}
- let candidate = one_bound_for_assoc_type(tcx,
- candidates,
- &trait_ref.to_string(),
- &binding.item_name.as_str(),
- binding.span)?;
-
- Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------------+
- projection_ty: ty::ProjectionTy { // |
- trait_ref: candidate.skip_binder().clone(), // binder is moved up here --+
- item_name: binding.item_name,
- },
- ty: binding.ty,
- }))
-}
+ fn ast_ty_to_trait_ref(&self,
+ rscope: &RegionScope,
+ ty: &hir::Ty,
+ bounds: &[hir::TyParamBound])
+ -> Result<TraitAndProjections<'tcx>, ErrorReported>
+ {
+ /*!
+ * In a type like `Foo + Send`, we want to wait to collect the
+ * full set of bounds before we make the object type, because we
+ * need them to infer a region bound. (For example, if we tried
+ * made a type from just `Foo`, then it wouldn't be enough to
+ * infer a 'static bound, and hence the user would get an error.)
+ * So this function is used when we're dealing with a sum type to
+ * convert the LHS. It only accepts a type that refers to a trait
+ * name, and reports an error otherwise.
+ */
+
+ match ty.node {
+ hir::TyPath(None, ref path) => {
+ let def = match self.tcx().def_map.borrow().get(&ty.id) {
+ Some(&def::PathResolution { base_def, depth: 0, .. }) => Some(base_def),
+ _ => None
+ };
+ match def {
+ Some(Def::Trait(trait_def_id)) => {
+ let mut projection_bounds = Vec::new();
+ let trait_ref =
+ self.object_path_to_poly_trait_ref(rscope,
+ path.span,
+ PathParamMode::Explicit,
+ trait_def_id,
+ path.segments.last().unwrap(),
+ &mut projection_bounds);
+ Ok((trait_ref, projection_bounds))
+ }
+ _ => {
+ span_err!(self.tcx().sess, ty.span, E0172,
+ "expected a reference to a trait");
+ Err(ErrorReported)
+ }
+ }
+ }
+ _ => {
+ let mut err = struct_span_err!(self.tcx().sess, ty.span, E0178,
+ "expected a path on the left-hand side \
+ of `+`, not `{}`",
+ pprust::ty_to_string(ty));
+ let hi = bounds.iter().map(|x| match *x {
+ hir::TraitTyParamBound(ref tr, _) => tr.span.hi,
+ hir::RegionTyParamBound(ref r) => r.span.hi,
+ }).max_by_key(|x| x.to_usize());
+ let full_span = hi.map(|hi| Span {
+ lo: ty.span.lo,
+ hi: hi,
+ expn_id: ty.span.expn_id,
+ });
+ match (&ty.node, full_span) {
+ (&hir::TyRptr(None, ref mut_ty), Some(full_span)) => {
+ let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" };
+ err.span_suggestion(full_span, "try adding parentheses (per RFC 438):",
+ format!("&{}({} +{})",
+ mutbl_str,
+ pprust::ty_to_string(&mut_ty.ty),
+ pprust::bounds_to_string(bounds)));
+ }
+ (&hir::TyRptr(Some(ref lt), ref mut_ty), Some(full_span)) => {
+ let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" };
+ err.span_suggestion(full_span, "try adding parentheses (per RFC 438):",
+ format!("&{} {}({} +{})",
+ pprust::lifetime_to_string(lt),
+ mutbl_str,
+ pprust::ty_to_string(&mut_ty.ty),
+ pprust::bounds_to_string(bounds)));
+ }
-fn ast_path_to_ty<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- did: DefId,
- item_segment: &hir::PathSegment)
- -> Ty<'tcx>
-{
- let tcx = this.tcx();
- let (generics, decl_ty) = match this.get_item_type_scheme(span, did) {
- Ok(ty::TypeScheme { generics, ty: decl_ty }) => {
- (generics, decl_ty)
- }
- Err(ErrorReported) => {
- return tcx.types.err;
+ _ => {
+ help!(&mut err,
+ "perhaps you forgot parentheses? (per RFC 438)");
+ }
+ }
+ err.emit();
+ Err(ErrorReported)
+ }
}
- };
+ }
- let substs = ast_path_substs_for_ty(this,
- rscope,
- span,
- param_mode,
- &generics,
- item_segment);
+ fn trait_ref_to_object_type(&self,
+ rscope: &RegionScope,
+ span: Span,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
+ bounds: &[hir::TyParamBound])
+ -> Ty<'tcx>
+ {
+ let existential_bounds = self.conv_existential_bounds(rscope,
+ span,
+ trait_ref.clone(),
+ projection_bounds,
+ bounds);
- // FIXME(#12938): This is a hack until we have full support for DST.
- if Some(did) == this.tcx().lang_items.owned_box() {
- assert_eq!(substs.types.len(TypeSpace), 1);
- return this.tcx().mk_box(*substs.types.get(TypeSpace, 0));
+ let result = self.make_object_type(span, trait_ref, existential_bounds);
+ debug!("trait_ref_to_object_type: result={:?}",
+ result);
+
+ result
}
- decl_ty.subst(this.tcx(), &substs)
-}
-
-type TraitAndProjections<'tcx> = (ty::PolyTraitRef<'tcx>, Vec<ty::PolyProjectionPredicate<'tcx>>);
+ fn make_object_type(&self,
+ span: Span,
+ principal: ty::PolyTraitRef<'tcx>,
+ bounds: ty::ExistentialBounds<'tcx>)
+ -> Ty<'tcx> {
+ let tcx = self.tcx();
+ let object = ty::TraitTy {
+ principal: principal,
+ bounds: bounds
+ };
+ let object_trait_ref =
+ object.principal_trait_ref_with_self_ty(tcx, tcx.types.err);
-fn ast_ty_to_trait_ref<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- ty: &hir::Ty,
- bounds: &[hir::TyParamBound])
- -> Result<TraitAndProjections<'tcx>, ErrorReported>
-{
- /*!
- * In a type like `Foo + Send`, we want to wait to collect the
- * full set of bounds before we make the object type, because we
- * need them to infer a region bound. (For example, if we tried
- * made a type from just `Foo`, then it wouldn't be enough to
- * infer a 'static bound, and hence the user would get an error.)
- * So this function is used when we're dealing with a sum type to
- * convert the LHS. It only accepts a type that refers to a trait
- * name, and reports an error otherwise.
- */
-
- match ty.node {
- hir::TyPath(None, ref path) => {
- let def = match this.tcx().def_map.borrow().get(&ty.id) {
- Some(&def::PathResolution { base_def, depth: 0, .. }) => Some(base_def),
- _ => None
- };
- match def {
- Some(Def::Trait(trait_def_id)) => {
- let mut projection_bounds = Vec::new();
- let trait_ref = object_path_to_poly_trait_ref(this,
- rscope,
- path.span,
- PathParamMode::Explicit,
- trait_def_id,
- path.segments.last().unwrap(),
- &mut projection_bounds);
- Ok((trait_ref, projection_bounds))
- }
- _ => {
- span_err!(this.tcx().sess, ty.span, E0172, "expected a reference to a trait");
- Err(ErrorReported)
- }
- }
+ // ensure the super predicates and stop if we encountered an error
+ if self.ensure_super_predicates(span, principal.def_id()).is_err() {
+ return tcx.types.err;
}
- _ => {
- let mut err = struct_span_err!(this.tcx().sess, ty.span, E0178,
- "expected a path on the left-hand side of `+`, not `{}`",
- pprust::ty_to_string(ty));
- let hi = bounds.iter().map(|x| match *x {
- hir::TraitTyParamBound(ref tr, _) => tr.span.hi,
- hir::RegionTyParamBound(ref r) => r.span.hi,
- }).max_by_key(|x| x.to_usize());
- let full_span = hi.map(|hi| Span {
- lo: ty.span.lo,
- hi: hi,
- expn_id: ty.span.expn_id,
- });
- match (&ty.node, full_span) {
- (&hir::TyRptr(None, ref mut_ty), Some(full_span)) => {
- let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" };
- err.span_suggestion(full_span, "try adding parentheses (per RFC 438):",
- format!("&{}({} +{})",
- mutbl_str,
- pprust::ty_to_string(&mut_ty.ty),
- pprust::bounds_to_string(bounds)));
- }
- (&hir::TyRptr(Some(ref lt), ref mut_ty), Some(full_span)) => {
- let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" };
- err.span_suggestion(full_span, "try adding parentheses (per RFC 438):",
- format!("&{} {}({} +{})",
- pprust::lifetime_to_string(lt),
- mutbl_str,
- pprust::ty_to_string(&mut_ty.ty),
- pprust::bounds_to_string(bounds)));
- }
- _ => {
- help!(&mut err,
- "perhaps you forgot parentheses? (per RFC 438)");
- }
- }
- err.emit();
- Err(ErrorReported)
+ // check that there are no gross object safety violations,
+ // most importantly, that the supertraits don't contain Self,
+ // to avoid ICE-s.
+ let object_safety_violations =
+ tcx.astconv_object_safety_violations(principal.def_id());
+ if !object_safety_violations.is_empty() {
+ tcx.report_object_safety_error(
+ span, principal.def_id(), None, object_safety_violations)
+ .unwrap().emit();
+ return tcx.types.err;
}
- }
-}
-fn trait_ref_to_object_type<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- trait_ref: ty::PolyTraitRef<'tcx>,
- projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
- bounds: &[hir::TyParamBound])
- -> Ty<'tcx>
-{
- let existential_bounds = conv_existential_bounds(this,
- rscope,
- span,
- trait_ref.clone(),
- projection_bounds,
- bounds);
-
- let result = make_object_type(this, span, trait_ref, existential_bounds);
- debug!("trait_ref_to_object_type: result={:?}",
- result);
-
- result
-}
-
-fn make_object_type<'tcx>(this: &AstConv<'tcx>,
- span: Span,
- principal: ty::PolyTraitRef<'tcx>,
- bounds: ty::ExistentialBounds<'tcx>)
- -> Ty<'tcx> {
- let tcx = this.tcx();
- let object = ty::TraitTy {
- principal: principal,
- bounds: bounds
- };
- let object_trait_ref =
- object.principal_trait_ref_with_self_ty(tcx, tcx.types.err);
+ let mut associated_types: FnvHashSet<(DefId, ast::Name)> =
+ traits::supertraits(tcx, object_trait_ref)
+ .flat_map(|tr| {
+ let trait_def = tcx.lookup_trait_def(tr.def_id());
+ trait_def.associated_type_names
+ .clone()
+ .into_iter()
+ .map(move |associated_type_name| (tr.def_id(), associated_type_name))
+ })
+ .collect();
- // ensure the super predicates and stop if we encountered an error
- if this.ensure_super_predicates(span, principal.def_id()).is_err() {
- return tcx.types.err;
- }
+ for projection_bound in &object.bounds.projection_bounds {
+ let pair = (projection_bound.0.projection_ty.trait_ref.def_id,
+ projection_bound.0.projection_ty.item_name);
+ associated_types.remove(&pair);
+ }
- // check that there are no gross object safety violations,
- // most importantly, that the supertraits don't contain Self,
- // to avoid ICE-s.
- let object_safety_violations =
- traits::astconv_object_safety_violations(tcx, principal.def_id());
- if !object_safety_violations.is_empty() {
- traits::report_object_safety_error(
- tcx, span, principal.def_id(), None, object_safety_violations)
- .unwrap().emit();
- return tcx.types.err;
- }
+ for (trait_def_id, name) in associated_types {
+ span_err!(tcx.sess, span, E0191,
+ "the value of the associated type `{}` (from the trait `{}`) must be specified",
+ name,
+ tcx.item_path_str(trait_def_id));
+ }
- let mut associated_types: FnvHashSet<(DefId, ast::Name)> =
- traits::supertraits(tcx, object_trait_ref)
- .flat_map(|tr| {
- let trait_def = tcx.lookup_trait_def(tr.def_id());
- trait_def.associated_type_names
- .clone()
- .into_iter()
- .map(move |associated_type_name| (tr.def_id(), associated_type_name))
- })
- .collect();
-
- for projection_bound in &object.bounds.projection_bounds {
- let pair = (projection_bound.0.projection_ty.trait_ref.def_id,
- projection_bound.0.projection_ty.item_name);
- associated_types.remove(&pair);
+ tcx.mk_trait(object.principal, object.bounds)
}
- for (trait_def_id, name) in associated_types {
- span_err!(tcx.sess, span, E0191,
- "the value of the associated type `{}` (from the trait `{}`) must be specified",
- name,
- tcx.item_path_str(trait_def_id));
+ fn report_ambiguous_associated_type(&self,
+ span: Span,
+ type_str: &str,
+ trait_str: &str,
+ name: &str) {
+ span_err!(self.tcx().sess, span, E0223,
+ "ambiguous associated type; specify the type using the syntax \
+ `<{} as {}>::{}`",
+ type_str, trait_str, name);
}
- tcx.mk_trait(object.principal, object.bounds)
-}
-
-fn report_ambiguous_associated_type(tcx: &TyCtxt,
- span: Span,
- type_str: &str,
- trait_str: &str,
- name: &str) {
- span_err!(tcx.sess, span, E0223,
- "ambiguous associated type; specify the type using the syntax \
- `<{} as {}>::{}`",
- type_str, trait_str, name);
-}
+ // Search for a bound on a type parameter which includes the associated item
+ // given by assoc_name. ty_param_node_id is the node id for the type parameter
+ // (which might be `Self`, but only if it is the `Self` of a trait, not an
+ // impl). This function will fail if there are no suitable bounds or there is
+ // any ambiguity.
+ fn find_bound_for_assoc_item(&self,
+ ty_param_node_id: ast::NodeId,
+ ty_param_name: ast::Name,
+ assoc_name: ast::Name,
+ span: Span)
+ -> Result<ty::PolyTraitRef<'tcx>, ErrorReported>
+ {
+ let tcx = self.tcx();
-// Search for a bound on a type parameter which includes the associated item
-// given by assoc_name. ty_param_node_id is the node id for the type parameter
-// (which might be `Self`, but only if it is the `Self` of a trait, not an
-// impl). This function will fail if there are no suitable bounds or there is
-// any ambiguity.
-fn find_bound_for_assoc_item<'tcx>(this: &AstConv<'tcx>,
- ty_param_node_id: ast::NodeId,
- ty_param_name: ast::Name,
- assoc_name: ast::Name,
- span: Span)
- -> Result<ty::PolyTraitRef<'tcx>, ErrorReported>
-{
- let tcx = this.tcx();
+ let bounds = match self.get_type_parameter_bounds(span, ty_param_node_id) {
+ Ok(v) => v,
+ Err(ErrorReported) => {
+ return Err(ErrorReported);
+ }
+ };
- let bounds = match this.get_type_parameter_bounds(span, ty_param_node_id) {
- Ok(v) => v,
- Err(ErrorReported) => {
+ // Ensure the super predicates and stop if we encountered an error.
+ if bounds.iter().any(|b| self.ensure_super_predicates(span, b.def_id()).is_err()) {
return Err(ErrorReported);
}
- };
- // Ensure the super predicates and stop if we encountered an error.
- if bounds.iter().any(|b| this.ensure_super_predicates(span, b.def_id()).is_err()) {
- return Err(ErrorReported);
+ // Check that there is exactly one way to find an associated type with the
+ // correct name.
+ let suitable_bounds: Vec<_> =
+ traits::transitive_bounds(tcx, &bounds)
+ .filter(|b| self.trait_defines_associated_type_named(b.def_id(), assoc_name))
+ .collect();
+
+ self.one_bound_for_assoc_type(suitable_bounds,
+ &ty_param_name.as_str(),
+ &assoc_name.as_str(),
+ span)
}
- // Check that there is exactly one way to find an associated type with the
- // correct name.
- let suitable_bounds: Vec<_> =
- traits::transitive_bounds(tcx, &bounds)
- .filter(|b| this.trait_defines_associated_type_named(b.def_id(), assoc_name))
- .collect();
-
- one_bound_for_assoc_type(tcx,
- suitable_bounds,
- &ty_param_name.as_str(),
- &assoc_name.as_str(),
- span)
-}
+ // Checks that bounds contains exactly one element and reports appropriate
+ // errors otherwise.
+ fn one_bound_for_assoc_type(&self,
+ bounds: Vec<ty::PolyTraitRef<'tcx>>,
+ ty_param_name: &str,
+ assoc_name: &str,
+ span: Span)
+ -> Result<ty::PolyTraitRef<'tcx>, ErrorReported>
+ {
+ if bounds.is_empty() {
+ span_err!(self.tcx().sess, span, E0220,
+ "associated type `{}` not found for `{}`",
+ assoc_name,
+ ty_param_name);
+ return Err(ErrorReported);
+ }
-// Checks that bounds contains exactly one element and reports appropriate
-// errors otherwise.
-fn one_bound_for_assoc_type<'tcx>(tcx: &TyCtxt<'tcx>,
- bounds: Vec<ty::PolyTraitRef<'tcx>>,
- ty_param_name: &str,
- assoc_name: &str,
- span: Span)
- -> Result<ty::PolyTraitRef<'tcx>, ErrorReported>
-{
- if bounds.is_empty() {
- span_err!(tcx.sess, span, E0220,
- "associated type `{}` not found for `{}`",
- assoc_name,
- ty_param_name);
- return Err(ErrorReported);
- }
+ if bounds.len() > 1 {
+ let mut err = struct_span_err!(self.tcx().sess, span, E0221,
+ "ambiguous associated type `{}` in bounds of `{}`",
+ assoc_name,
+ ty_param_name);
- if bounds.len() > 1 {
- let mut err = struct_span_err!(tcx.sess, span, E0221,
- "ambiguous associated type `{}` in bounds of `{}`",
- assoc_name,
- ty_param_name);
-
- for bound in &bounds {
- span_note!(&mut err, span,
- "associated type `{}` could derive from `{}`",
- ty_param_name,
- bound);
+ for bound in &bounds {
+ span_note!(&mut err, span,
+ "associated type `{}` could derive from `{}`",
+ ty_param_name,
+ bound);
+ }
+ err.emit();
}
- err.emit();
+
+ Ok(bounds[0].clone())
}
- Ok(bounds[0].clone())
-}
+ // Create a type from a path to an associated type.
+ // For a path A::B::C::D, ty and ty_path_def are the type and def for A::B::C
+ // and item_segment is the path segment for D. We return a type and a def for
+ // the whole path.
+ // Will fail except for T::A and Self::A; i.e., if ty/ty_path_def are not a type
+ // parameter or Self.
+ fn associated_path_def_to_ty(&self,
+ span: Span,
+ ty: Ty<'tcx>,
+ ty_path_def: Def,
+ item_segment: &hir::PathSegment)
+ -> (Ty<'tcx>, Def)
+ {
+ let tcx = self.tcx();
+ let assoc_name = item_segment.identifier.name;
+
+ debug!("associated_path_def_to_ty: {:?}::{}", ty, assoc_name);
+
+ tcx.prohibit_type_params(slice::ref_slice(item_segment));
+
+ // Find the type of the associated item, and the trait where the associated
+ // item is declared.
+ let bound = match (&ty.sty, ty_path_def) {
+ (_, Def::SelfTy(Some(trait_did), Some(impl_id))) => {
+ // `Self` in an impl of a trait - we have a concrete self type and a
+ // trait reference.
+ let trait_ref = tcx.impl_trait_ref(tcx.map.local_def_id(impl_id)).unwrap();
+ let trait_ref = if let Some(free_substs) = self.get_free_substs() {
+ trait_ref.subst(tcx, free_substs)
+ } else {
+ trait_ref
+ };
-// Create a type from a path to an associated type.
-// For a path A::B::C::D, ty and ty_path_def are the type and def for A::B::C
-// and item_segment is the path segment for D. We return a type and a def for
-// the whole path.
-// Will fail except for T::A and Self::A; i.e., if ty/ty_path_def are not a type
-// parameter or Self.
-fn associated_path_def_to_ty<'tcx>(this: &AstConv<'tcx>,
- span: Span,
- ty: Ty<'tcx>,
- ty_path_def: Def,
- item_segment: &hir::PathSegment)
- -> (Ty<'tcx>, Def)
-{
- let tcx = this.tcx();
- let assoc_name = item_segment.identifier.name;
-
- debug!("associated_path_def_to_ty: {:?}::{}", ty, assoc_name);
-
- prohibit_type_params(tcx, slice::ref_slice(item_segment));
-
- // Find the type of the associated item, and the trait where the associated
- // item is declared.
- let bound = match (&ty.sty, ty_path_def) {
- (_, Def::SelfTy(Some(trait_did), Some((impl_id, _)))) => {
- // `Self` in an impl of a trait - we have a concrete self type and a
- // trait reference.
- let trait_ref = tcx.impl_trait_ref(tcx.map.local_def_id(impl_id)).unwrap();
- let trait_ref = if let Some(free_substs) = this.get_free_substs() {
- trait_ref.subst(tcx, free_substs)
- } else {
- trait_ref
- };
+ if self.ensure_super_predicates(span, trait_did).is_err() {
+ return (tcx.types.err, ty_path_def);
+ }
- if this.ensure_super_predicates(span, trait_did).is_err() {
- return (tcx.types.err, ty_path_def);
+ let candidates: Vec<ty::PolyTraitRef> =
+ traits::supertraits(tcx, ty::Binder(trait_ref))
+ .filter(|r| self.trait_defines_associated_type_named(r.def_id(),
+ assoc_name))
+ .collect();
+
+ match self.one_bound_for_assoc_type(candidates,
+ "Self",
+ &assoc_name.as_str(),
+ span) {
+ Ok(bound) => bound,
+ Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ }
}
-
- let candidates: Vec<ty::PolyTraitRef> =
- traits::supertraits(tcx, ty::Binder(trait_ref))
- .filter(|r| this.trait_defines_associated_type_named(r.def_id(),
- assoc_name))
- .collect();
-
- match one_bound_for_assoc_type(tcx,
- candidates,
- "Self",
- &assoc_name.as_str(),
- span) {
- Ok(bound) => bound,
- Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ (&ty::TyParam(_), Def::SelfTy(Some(trait_did), None)) => {
+ let trait_node_id = tcx.map.as_local_node_id(trait_did).unwrap();
+ match self.find_bound_for_assoc_item(trait_node_id,
+ keywords::SelfType.name(),
+ assoc_name,
+ span) {
+ Ok(bound) => bound,
+ Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ }
}
- }
- (&ty::TyParam(_), Def::SelfTy(Some(trait_did), None)) => {
- let trait_node_id = tcx.map.as_local_node_id(trait_did).unwrap();
- match find_bound_for_assoc_item(this,
- trait_node_id,
- keywords::SelfType.name(),
- assoc_name,
- span) {
- Ok(bound) => bound,
- Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ (&ty::TyParam(_), Def::TyParam(_, _, param_did, param_name)) => {
+ let param_node_id = tcx.map.as_local_node_id(param_did).unwrap();
+ match self.find_bound_for_assoc_item(param_node_id,
+ param_name,
+ assoc_name,
+ span) {
+ Ok(bound) => bound,
+ Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ }
}
- }
- (&ty::TyParam(_), Def::TyParam(_, _, param_did, param_name)) => {
- let param_node_id = tcx.map.as_local_node_id(param_did).unwrap();
- match find_bound_for_assoc_item(this,
- param_node_id,
- param_name,
- assoc_name,
- span) {
- Ok(bound) => bound,
- Err(ErrorReported) => return (tcx.types.err, ty_path_def),
+ _ => {
+ self.report_ambiguous_associated_type(span,
+ &ty.to_string(),
+ "Trait",
+ &assoc_name.as_str());
+ return (tcx.types.err, ty_path_def);
}
- }
- _ => {
- report_ambiguous_associated_type(tcx,
- span,
- &ty.to_string(),
- "Trait",
- &assoc_name.as_str());
- return (tcx.types.err, ty_path_def);
- }
- };
+ };
- let trait_did = bound.0.def_id;
- let ty = this.projected_ty_from_poly_trait_ref(span, bound, assoc_name);
-
- let item_did = if let Some(trait_id) = tcx.map.as_local_node_id(trait_did) {
- // `ty::trait_items` used below requires information generated
- // by type collection, which may be in progress at this point.
- match tcx.map.expect_item(trait_id).node {
- hir::ItemTrait(_, _, _, ref trait_items) => {
- let item = trait_items.iter()
- .find(|i| i.name == assoc_name)
- .expect("missing associated type");
- tcx.map.local_def_id(item.id)
+ let trait_did = bound.0.def_id;
+ let ty = self.projected_ty_from_poly_trait_ref(span, bound, assoc_name);
+
+ let item_did = if let Some(trait_id) = tcx.map.as_local_node_id(trait_did) {
+ // `ty::trait_items` used below requires information generated
+ // by type collection, which may be in progress at this point.
+ match tcx.map.expect_item(trait_id).node {
+ hir::ItemTrait(_, _, _, ref trait_items) => {
+ let item = trait_items.iter()
+ .find(|i| i.name == assoc_name)
+ .expect("missing associated type");
+ tcx.map.local_def_id(item.id)
+ }
+ _ => bug!()
}
- _ => bug!()
- }
- } else {
- let trait_items = tcx.trait_items(trait_did);
- let item = trait_items.iter().find(|i| i.name() == assoc_name);
- item.expect("missing associated type").def_id()
- };
+ } else {
+ let trait_items = tcx.trait_items(trait_did);
+ let item = trait_items.iter().find(|i| i.name() == assoc_name);
+ item.expect("missing associated type").def_id()
+ };
- (ty, Def::AssociatedTy(trait_did, item_did))
-}
+ (ty, Def::AssociatedTy(trait_did, item_did))
+ }
-fn qpath_to_ty<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- opt_self_ty: Option<Ty<'tcx>>,
- trait_def_id: DefId,
- trait_segment: &hir::PathSegment,
- item_segment: &hir::PathSegment)
- -> Ty<'tcx>
-{
- let tcx = this.tcx();
+ fn qpath_to_ty(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ opt_self_ty: Option<Ty<'tcx>>,
+ trait_def_id: DefId,
+ trait_segment: &hir::PathSegment,
+ item_segment: &hir::PathSegment)
+ -> Ty<'tcx>
+ {
+ let tcx = self.tcx();
- prohibit_type_params(tcx, slice::ref_slice(item_segment));
+ tcx.prohibit_type_params(slice::ref_slice(item_segment));
- let self_ty = if let Some(ty) = opt_self_ty {
- ty
- } else {
- let path_str = tcx.item_path_str(trait_def_id);
- report_ambiguous_associated_type(tcx,
- span,
- "Type",
- &path_str,
- &item_segment.identifier.name.as_str());
- return tcx.types.err;
- };
+ let self_ty = if let Some(ty) = opt_self_ty {
+ ty
+ } else {
+ let path_str = tcx.item_path_str(trait_def_id);
+ self.report_ambiguous_associated_type(span,
+ "Type",
+ &path_str,
+ &item_segment.identifier.name.as_str());
+ return tcx.types.err;
+ };
- debug!("qpath_to_ty: self_type={:?}", self_ty);
+ debug!("qpath_to_ty: self_type={:?}", self_ty);
- let trait_ref = ast_path_to_mono_trait_ref(this,
- rscope,
- span,
- param_mode,
- trait_def_id,
- Some(self_ty),
- trait_segment);
+ let trait_ref = self.ast_path_to_mono_trait_ref(rscope,
+ span,
+ param_mode,
+ trait_def_id,
+ Some(self_ty),
+ trait_segment);
- debug!("qpath_to_ty: trait_ref={:?}", trait_ref);
+ debug!("qpath_to_ty: trait_ref={:?}", trait_ref);
- this.projected_ty(span, trait_ref, item_segment.identifier.name)
-}
+ self.projected_ty(span, trait_ref, item_segment.identifier.name)
+ }
-/// Convert a type supplied as value for a type argument from AST into our
-/// our internal representation. This is the same as `ast_ty_to_ty` but that
-/// it applies the object lifetime default.
-///
-/// # Parameters
-///
-/// * `this`, `rscope`: the surrounding context
-/// * `decl_generics`: the generics of the struct/enum/trait declaration being
-/// referenced
-/// * `index`: the index of the type parameter being instantiated from the list
-/// (we assume it is in the `TypeSpace`)
-/// * `region_substs`: a partial substitution consisting of
-/// only the region type parameters being supplied to this type.
-/// * `ast_ty`: the ast representation of the type being supplied
-pub fn ast_ty_arg_to_ty<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- decl_generics: &ty::Generics<'tcx>,
- index: usize,
- region_substs: &Substs<'tcx>,
- ast_ty: &hir::Ty)
- -> Ty<'tcx>
-{
- let tcx = this.tcx();
+ /// Convert a type supplied as value for a type argument from AST into our
+ /// our internal representation. This is the same as `ast_ty_to_ty` but that
+ /// it applies the object lifetime default.
+ ///
+ /// # Parameters
+ ///
+ /// * `this`, `rscope`: the surrounding context
+ /// * `decl_generics`: the generics of the struct/enum/trait declaration being
+ /// referenced
+ /// * `index`: the index of the type parameter being instantiated from the list
+ /// (we assume it is in the `TypeSpace`)
+ /// * `region_substs`: a partial substitution consisting of
+ /// only the region type parameters being supplied to this type.
+ /// * `ast_ty`: the ast representation of the type being supplied
+ pub fn ast_ty_arg_to_ty(&self,
+ rscope: &RegionScope,
+ decl_generics: &ty::Generics<'tcx>,
+ index: usize,
+ region_substs: &Substs<'tcx>,
+ ast_ty: &hir::Ty)
+ -> Ty<'tcx>
+ {
+ let tcx = self.tcx();
- if let Some(def) = decl_generics.types.opt_get(TypeSpace, index) {
- let object_lifetime_default = def.object_lifetime_default.subst(tcx, region_substs);
- let rscope1 = &ObjectLifetimeDefaultRscope::new(rscope, object_lifetime_default);
- ast_ty_to_ty(this, rscope1, ast_ty)
- } else {
- ast_ty_to_ty(this, rscope, ast_ty)
+ if let Some(def) = decl_generics.types.opt_get(TypeSpace, index) {
+ let object_lifetime_default = def.object_lifetime_default.subst(tcx, region_substs);
+ let rscope1 = &ObjectLifetimeDefaultRscope::new(rscope, object_lifetime_default);
+ self.ast_ty_to_ty(rscope1, ast_ty)
+ } else {
+ self.ast_ty_to_ty(rscope, ast_ty)
+ }
}
-}
-// Check the base def in a PathResolution and convert it to a Ty. If there are
-// associated types in the PathResolution, these will need to be separately
-// resolved.
-fn base_def_to_ty<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- def: &Def,
- opt_self_ty: Option<Ty<'tcx>>,
- base_segments: &[hir::PathSegment])
- -> Ty<'tcx> {
- let tcx = this.tcx();
-
- match *def {
- Def::Trait(trait_def_id) => {
- // N.B. this case overlaps somewhat with
- // TyObjectSum, see that fn for details
- let mut projection_bounds = Vec::new();
-
- let trait_ref = object_path_to_poly_trait_ref(this,
- rscope,
- span,
- param_mode,
- trait_def_id,
- base_segments.last().unwrap(),
- &mut projection_bounds);
-
- prohibit_type_params(tcx, base_segments.split_last().unwrap().1);
- trait_ref_to_object_type(this,
- rscope,
- span,
- trait_ref,
- projection_bounds,
- &[])
- }
- Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) => {
- prohibit_type_params(tcx, base_segments.split_last().unwrap().1);
- ast_path_to_ty(this,
- rscope,
- span,
- param_mode,
- did,
- base_segments.last().unwrap())
- }
- Def::TyParam(space, index, _, name) => {
- prohibit_type_params(tcx, base_segments);
- tcx.mk_param(space, index, name)
- }
- Def::SelfTy(_, Some((_, self_ty_id))) => {
- // Self in impl (we know the concrete type).
- prohibit_type_params(tcx, base_segments);
- if let Some(&ty) = tcx.ast_ty_to_ty_cache.borrow().get(&self_ty_id) {
- if let Some(free_substs) = this.get_free_substs() {
+ // Check the base def in a PathResolution and convert it to a Ty. If there are
+ // associated types in the PathResolution, these will need to be separately
+ // resolved.
+ fn base_def_to_ty(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ def: &Def,
+ opt_self_ty: Option<Ty<'tcx>>,
+ base_segments: &[hir::PathSegment])
+ -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ match *def {
+ Def::Trait(trait_def_id) => {
+ // N.B. this case overlaps somewhat with
+ // TyObjectSum, see that fn for details
+ let mut projection_bounds = Vec::new();
+
+ let trait_ref =
+ self.object_path_to_poly_trait_ref(rscope,
+ span,
+ param_mode,
+ trait_def_id,
+ base_segments.last().unwrap(),
+ &mut projection_bounds);
+
+ tcx.prohibit_type_params(base_segments.split_last().unwrap().1);
+ self.trait_ref_to_object_type(rscope,
+ span,
+ trait_ref,
+ projection_bounds,
+ &[])
+ }
+ Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) => {
+ tcx.prohibit_type_params(base_segments.split_last().unwrap().1);
+ self.ast_path_to_ty(rscope,
+ span,
+ param_mode,
+ did,
+ base_segments.last().unwrap())
+ }
+ Def::TyParam(space, index, _, name) => {
+ tcx.prohibit_type_params(base_segments);
+ tcx.mk_param(space, index, name)
+ }
+ Def::SelfTy(_, Some(impl_id)) => {
+ // Self in impl (we know the concrete type).
+ tcx.prohibit_type_params(base_segments);
+ let ty = tcx.node_id_to_type(impl_id);
+ if let Some(free_substs) = self.get_free_substs() {
ty.subst(tcx, free_substs)
} else {
ty
}
- } else {
- span_bug!(span, "self type has not been fully resolved")
}
- }
- Def::SelfTy(Some(_), None) => {
- // Self in trait.
- prohibit_type_params(tcx, base_segments);
- tcx.mk_self_type()
- }
- Def::AssociatedTy(trait_did, _) => {
- prohibit_type_params(tcx, &base_segments[..base_segments.len()-2]);
- qpath_to_ty(this,
- rscope,
- span,
- param_mode,
- opt_self_ty,
- trait_did,
- &base_segments[base_segments.len()-2],
- base_segments.last().unwrap())
- }
- Def::Mod(..) => {
- // Used as sentinel by callers to indicate the `<T>::A::B::C` form.
- // FIXME(#22519) This part of the resolution logic should be
- // avoided entirely for that form, once we stop needed a Def
- // for `associated_path_def_to_ty`.
- // Fixing this will also let use resolve <Self>::Foo the same way we
- // resolve Self::Foo, at the moment we can't resolve the former because
- // we don't have the trait information around, which is just sad.
-
- assert!(base_segments.is_empty());
-
- opt_self_ty.expect("missing T in <T>::a::b::c")
- }
- Def::PrimTy(prim_ty) => {
- prim_ty_to_ty(tcx, base_segments, prim_ty)
- }
- Def::Err => {
- this.set_tainted_by_errors();
- return this.tcx().types.err;
- }
- _ => {
- span_err!(tcx.sess, span, E0248,
- "found value `{}` used as a type",
- tcx.item_path_str(def.def_id()));
- return this.tcx().types.err;
+ Def::SelfTy(Some(_), None) => {
+ // Self in trait.
+ tcx.prohibit_type_params(base_segments);
+ tcx.mk_self_type()
+ }
+ Def::AssociatedTy(trait_did, _) => {
+ tcx.prohibit_type_params(&base_segments[..base_segments.len()-2]);
+ self.qpath_to_ty(rscope,
+ span,
+ param_mode,
+ opt_self_ty,
+ trait_did,
+ &base_segments[base_segments.len()-2],
+ base_segments.last().unwrap())
+ }
+ Def::Mod(..) => {
+ // Used as sentinel by callers to indicate the `<T>::A::B::C` form.
+ // FIXME(#22519) This part of the resolution logic should be
+ // avoided entirely for that form, once we stop needed a Def
+ // for `associated_path_def_to_ty`.
+ // Fixing this will also let use resolve <Self>::Foo the same way we
+ // resolve Self::Foo, at the moment we can't resolve the former because
+ // we don't have the trait information around, which is just sad.
+
+ assert!(base_segments.is_empty());
+
+ opt_self_ty.expect("missing T in <T>::a::b::c")
+ }
+ Def::PrimTy(prim_ty) => {
+ tcx.prim_ty_to_ty(base_segments, prim_ty)
+ }
+ Def::Err => {
+ self.set_tainted_by_errors();
+ return self.tcx().types.err;
+ }
+ _ => {
+ span_err!(tcx.sess, span, E0248,
+ "found value `{}` used as a type",
+ tcx.item_path_str(def.def_id()));
+ return self.tcx().types.err;
+ }
}
}
-}
-// Note that both base_segments and assoc_segments may be empty, although not at
-// the same time.
-pub fn finish_resolving_def_to_ty<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- param_mode: PathParamMode,
- def: &Def,
- opt_self_ty: Option<Ty<'tcx>>,
- base_segments: &[hir::PathSegment],
- assoc_segments: &[hir::PathSegment])
- -> Ty<'tcx> {
- let mut ty = base_def_to_ty(this,
- rscope,
- span,
- param_mode,
- def,
- opt_self_ty,
- base_segments);
- let mut def = *def;
- // If any associated type segments remain, attempt to resolve them.
- for segment in assoc_segments {
- if ty.sty == ty::TyError {
- break;
+ // Note that both base_segments and assoc_segments may be empty, although not at
+ // the same time.
+ pub fn finish_resolving_def_to_ty(&self,
+ rscope: &RegionScope,
+ span: Span,
+ param_mode: PathParamMode,
+ def: &Def,
+ opt_self_ty: Option<Ty<'tcx>>,
+ base_segments: &[hir::PathSegment],
+ assoc_segments: &[hir::PathSegment])
+ -> Ty<'tcx> {
+ let mut ty = self.base_def_to_ty(rscope,
+ span,
+ param_mode,
+ def,
+ opt_self_ty,
+ base_segments);
+ let mut def = *def;
+ // If any associated type segments remain, attempt to resolve them.
+ for segment in assoc_segments {
+ if ty.sty == ty::TyError {
+ break;
+ }
+ // This is pretty bad (it will fail except for T::A and Self::A).
+ let (a_ty, a_def) = self.associated_path_def_to_ty(span,
+ ty,
+ def,
+ segment);
+ ty = a_ty;
+ def = a_def;
}
- // This is pretty bad (it will fail except for T::A and Self::A).
- let (a_ty, a_def) = associated_path_def_to_ty(this,
- span,
- ty,
- def,
- segment);
- ty = a_ty;
- def = a_def;
+ ty
}
- ty
-}
-/// Parses the programmer's textual representation of a type into our
-/// internal notion of a type.
-pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- ast_ty: &hir::Ty)
- -> Ty<'tcx>
-{
- debug!("ast_ty_to_ty(id={:?}, ast_ty={:?})",
- ast_ty.id, ast_ty);
-
- let tcx = this.tcx();
+ /// Parses the programmer's textual representation of a type into our
+ /// internal notion of a type.
+ pub fn ast_ty_to_ty(&self, rscope: &RegionScope, ast_ty: &hir::Ty) -> Ty<'tcx> {
+ debug!("ast_ty_to_ty(id={:?}, ast_ty={:?})",
+ ast_ty.id, ast_ty);
- if let Some(&ty) = tcx.ast_ty_to_ty_cache.borrow().get(&ast_ty.id) {
- debug!("ast_ty_to_ty: id={:?} ty={:?} (cached)", ast_ty.id, ty);
- return ty;
- }
+ let tcx = self.tcx();
- let typ = match ast_ty.node {
- hir::TyVec(ref ty) => {
- tcx.mk_slice(ast_ty_to_ty(this, rscope, &ty))
- }
- hir::TyObjectSum(ref ty, ref bounds) => {
- match ast_ty_to_trait_ref(this, rscope, &ty, bounds) {
- Ok((trait_ref, projection_bounds)) => {
- trait_ref_to_object_type(this,
- rscope,
- ast_ty.span,
- trait_ref,
- projection_bounds,
- bounds)
- }
- Err(ErrorReported) => {
- this.tcx().types.err
- }
+ match ast_ty.node {
+ hir::TyVec(ref ty) => {
+ tcx.mk_slice(self.ast_ty_to_ty(rscope, &ty))
}
- }
- hir::TyPtr(ref mt) => {
- tcx.mk_ptr(ty::TypeAndMut {
- ty: ast_ty_to_ty(this, rscope, &mt.ty),
- mutbl: mt.mutbl
- })
- }
- hir::TyRptr(ref region, ref mt) => {
- let r = opt_ast_region_to_region(this, rscope, ast_ty.span, region);
- debug!("TyRef r={:?}", r);
- let rscope1 =
- &ObjectLifetimeDefaultRscope::new(
- rscope,
- ty::ObjectLifetimeDefault::Specific(r));
- let t = ast_ty_to_ty(this, rscope1, &mt.ty);
- tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
- }
- hir::TyTup(ref fields) => {
- let flds = fields.iter()
- .map(|t| ast_ty_to_ty(this, rscope, &t))
- .collect();
- tcx.mk_tup(flds)
- }
- hir::TyBareFn(ref bf) => {
- require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
- tcx.mk_fn_ptr(ty_of_bare_fn(this, bf.unsafety, bf.abi, &bf.decl))
- }
- hir::TyPolyTraitRef(ref bounds) => {
- conv_ty_poly_trait_ref(this, rscope, ast_ty.span, bounds)
- }
- hir::TyPath(ref maybe_qself, ref path) => {
- let path_res = if let Some(&d) = tcx.def_map.borrow().get(&ast_ty.id) {
- d
- } else if let Some(hir::QSelf { position: 0, .. }) = *maybe_qself {
- // Create some fake resolution that can't possibly be a type.
- def::PathResolution {
- base_def: Def::Mod(tcx.map.local_def_id(ast::CRATE_NODE_ID)),
- depth: path.segments.len()
+ hir::TyObjectSum(ref ty, ref bounds) => {
+ match self.ast_ty_to_trait_ref(rscope, &ty, bounds) {
+ Ok((trait_ref, projection_bounds)) => {
+ self.trait_ref_to_object_type(rscope,
+ ast_ty.span,
+ trait_ref,
+ projection_bounds,
+ bounds)
+ }
+ Err(ErrorReported) => {
+ self.tcx().types.err
+ }
}
- } else {
- span_bug!(ast_ty.span, "unbound path {:?}", ast_ty)
- };
- let def = path_res.base_def;
- let base_ty_end = path.segments.len() - path_res.depth;
- let opt_self_ty = maybe_qself.as_ref().map(|qself| {
- ast_ty_to_ty(this, rscope, &qself.ty)
- });
- let ty = finish_resolving_def_to_ty(this,
- rscope,
- ast_ty.span,
- PathParamMode::Explicit,
- &def,
- opt_self_ty,
- &path.segments[..base_ty_end],
- &path.segments[base_ty_end..]);
-
- if path_res.depth != 0 && ty.sty != ty::TyError {
- // Write back the new resolution.
- tcx.def_map.borrow_mut().insert(ast_ty.id, def::PathResolution {
- base_def: def,
- depth: 0
- });
}
+ hir::TyPtr(ref mt) => {
+ tcx.mk_ptr(ty::TypeAndMut {
+ ty: self.ast_ty_to_ty(rscope, &mt.ty),
+ mutbl: mt.mutbl
+ })
+ }
+ hir::TyRptr(ref region, ref mt) => {
+ let r = self.opt_ast_region_to_region(rscope, ast_ty.span, region);
+ debug!("TyRef r={:?}", r);
+ let rscope1 =
+ &ObjectLifetimeDefaultRscope::new(
+ rscope,
+ ty::ObjectLifetimeDefault::Specific(r));
+ let t = self.ast_ty_to_ty(rscope1, &mt.ty);
+ tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
+ }
+ hir::TyTup(ref fields) => {
+ let flds = fields.iter()
+ .map(|t| self.ast_ty_to_ty(rscope, &t))
+ .collect();
+ tcx.mk_tup(flds)
+ }
+ hir::TyBareFn(ref bf) => {
+ require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
+ tcx.mk_fn_ptr(self.ty_of_bare_fn(bf.unsafety, bf.abi, &bf.decl))
+ }
+ hir::TyPolyTraitRef(ref bounds) => {
+ self.conv_ty_poly_trait_ref(rscope, ast_ty.span, bounds)
+ }
+ hir::TyPath(ref maybe_qself, ref path) => {
+ let path_res = if let Some(&d) = tcx.def_map.borrow().get(&ast_ty.id) {
+ d
+ } else if let Some(hir::QSelf { position: 0, .. }) = *maybe_qself {
+ // Create some fake resolution that can't possibly be a type.
+ def::PathResolution {
+ base_def: Def::Mod(tcx.map.local_def_id(ast::CRATE_NODE_ID)),
+ depth: path.segments.len()
+ }
+ } else {
+ span_bug!(ast_ty.span, "unbound path {:?}", ast_ty)
+ };
+ let def = path_res.base_def;
+ let base_ty_end = path.segments.len() - path_res.depth;
+ let opt_self_ty = maybe_qself.as_ref().map(|qself| {
+ self.ast_ty_to_ty(rscope, &qself.ty)
+ });
+ let ty = self.finish_resolving_def_to_ty(rscope,
+ ast_ty.span,
+ PathParamMode::Explicit,
+ &def,
+ opt_self_ty,
+ &path.segments[..base_ty_end],
+ &path.segments[base_ty_end..]);
+
+ if path_res.depth != 0 && ty.sty != ty::TyError {
+ // Write back the new resolution.
+ tcx.def_map.borrow_mut().insert(ast_ty.id, def::PathResolution {
+ base_def: def,
+ depth: 0
+ });
+ }
- ty
- }
- hir::TyFixedLengthVec(ref ty, ref e) => {
- let hint = UncheckedExprHint(tcx.types.usize);
- match eval_const_expr_partial(tcx, &e, hint, None) {
- Ok(ConstVal::Integral(ConstInt::Usize(i))) => {
- let i = i.as_u64(tcx.sess.target.uint_type);
- assert_eq!(i as usize as u64, i);
- tcx.mk_array(ast_ty_to_ty(this, rscope, &ty), i as usize)
- },
- Ok(val) => {
- span_err!(tcx.sess, ast_ty.span, E0249,
- "expected usize value for array length, got {}", val.description());
- this.tcx().types.err
- },
- // array length errors happen before the global constant check
- // so we need to report the real error
- Err(ConstEvalErr { kind: ErroneousReferencedConstant(box r), ..}) |
- Err(r) => {
- let mut err = struct_span_err!(tcx.sess, r.span, E0250,
- "array length constant evaluation error: {}",
- r.description());
- if !ast_ty.span.contains(r.span) {
- span_note!(&mut err, ast_ty.span, "for array length here")
+ ty
+ }
+ hir::TyFixedLengthVec(ref ty, ref e) => {
+ let hint = UncheckedExprHint(tcx.types.usize);
+ match eval_const_expr_partial(tcx.global_tcx(), &e, hint, None) {
+ Ok(ConstVal::Integral(ConstInt::Usize(i))) => {
+ let i = i.as_u64(tcx.sess.target.uint_type);
+ assert_eq!(i as usize as u64, i);
+ tcx.mk_array(self.ast_ty_to_ty(rscope, &ty), i as usize)
+ },
+ Ok(val) => {
+ span_err!(tcx.sess, ast_ty.span, E0249,
+ "expected usize value for array length, got {}",
+ val.description());
+ self.tcx().types.err
+ },
+ // array length errors happen before the global constant check
+ // so we need to report the real error
+ Err(ConstEvalErr { kind: ErroneousReferencedConstant(box r), ..}) |
+ Err(r) => {
+ let mut err = struct_span_err!(tcx.sess, r.span, E0250,
+ "array length constant \
+ evaluation error: {}",
+ r.description());
+ if !ast_ty.span.contains(r.span) {
+ span_note!(&mut err, ast_ty.span, "for array length here")
+ }
+ err.emit();
+ self.tcx().types.err
}
- err.emit();
- this.tcx().types.err
}
}
+ hir::TyTypeof(ref _e) => {
+ span_err!(tcx.sess, ast_ty.span, E0516,
+ "`typeof` is a reserved keyword but unimplemented");
+ tcx.types.err
+ }
+ hir::TyInfer => {
+ // TyInfer also appears as the type of arguments or return
+ // values in a ExprClosure, or as
+ // the type of local variables. Both of these cases are
+ // handled specially and will not descend into this routine.
+ self.ty_infer(None, None, None, ast_ty.span)
+ }
}
- hir::TyTypeof(ref _e) => {
- span_err!(tcx.sess, ast_ty.span, E0516,
- "`typeof` is a reserved keyword but unimplemented");
- tcx.types.err
- }
- hir::TyInfer => {
- // TyInfer also appears as the type of arguments or return
- // values in a ExprClosure, or as
- // the type of local variables. Both of these cases are
- // handled specially and will not descend into this routine.
- this.ty_infer(None, None, None, ast_ty.span)
- }
- };
-
- debug!("ast_ty_to_ty: id={:?} ty={:?}", ast_ty.id, typ);
- tcx.ast_ty_to_ty_cache.borrow_mut().insert(ast_ty.id, typ);
- return typ;
-}
-
-pub fn ty_of_arg<'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- a: &hir::Arg,
- expected_ty: Option<Ty<'tcx>>)
- -> Ty<'tcx>
-{
- match a.ty.node {
- hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
- hir::TyInfer => this.ty_infer(None, None, None, a.ty.span),
- _ => ast_ty_to_ty(this, rscope, &a.ty),
}
-}
-struct SelfInfo<'a, 'tcx> {
- untransformed_self_ty: Ty<'tcx>,
- explicit_self: &'a hir::ExplicitSelf,
-}
+ pub fn ty_of_arg(&self,
+ rscope: &RegionScope,
+ a: &hir::Arg,
+ expected_ty: Option<Ty<'tcx>>)
+ -> Ty<'tcx>
+ {
+ match a.ty.node {
+ hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
+ hir::TyInfer => self.ty_infer(None, None, None, a.ty.span),
+ _ => self.ast_ty_to_ty(rscope, &a.ty),
+ }
+ }
-pub fn ty_of_method<'tcx>(this: &AstConv<'tcx>,
- sig: &hir::MethodSig,
- untransformed_self_ty: Ty<'tcx>)
- -> (ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory) {
- let self_info = Some(SelfInfo {
- untransformed_self_ty: untransformed_self_ty,
- explicit_self: &sig.explicit_self,
- });
- let (bare_fn_ty, optional_explicit_self_category) =
- ty_of_method_or_bare_fn(this,
- sig.unsafety,
- sig.abi,
- self_info,
- &sig.decl);
- (bare_fn_ty, optional_explicit_self_category.unwrap())
-}
+ pub fn ty_of_method(&self,
+ sig: &hir::MethodSig,
+ untransformed_self_ty: Ty<'tcx>)
+ -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory) {
+ let self_info = Some(SelfInfo {
+ untransformed_self_ty: untransformed_self_ty,
+ explicit_self: &sig.explicit_self,
+ });
+ let (bare_fn_ty, optional_explicit_self_category) =
+ self.ty_of_method_or_bare_fn(sig.unsafety,
+ sig.abi,
+ self_info,
+ &sig.decl);
+ (bare_fn_ty, optional_explicit_self_category.unwrap())
+ }
-pub fn ty_of_bare_fn<'tcx>(this: &AstConv<'tcx>, unsafety: hir::Unsafety, abi: abi::Abi,
- decl: &hir::FnDecl) -> ty::BareFnTy<'tcx> {
- let (bare_fn_ty, _) = ty_of_method_or_bare_fn(this, unsafety, abi, None, decl);
- bare_fn_ty
-}
+ pub fn ty_of_bare_fn(&self,
+ unsafety: hir::Unsafety, abi: abi::Abi,
+ decl: &hir::FnDecl)
+ -> &'tcx ty::BareFnTy<'tcx> {
+ let (bare_fn_ty, _) = self.ty_of_method_or_bare_fn(unsafety, abi, None, decl);
+ bare_fn_ty
+ }
-fn ty_of_method_or_bare_fn<'a, 'tcx>(this: &AstConv<'tcx>,
- unsafety: hir::Unsafety,
- abi: abi::Abi,
- opt_self_info: Option<SelfInfo<'a, 'tcx>>,
- decl: &hir::FnDecl)
- -> (ty::BareFnTy<'tcx>, Option<ty::ExplicitSelfCategory>)
-{
- debug!("ty_of_method_or_bare_fn");
-
- // New region names that appear inside of the arguments of the function
- // declaration are bound to that function type.
- let rb = rscope::BindingRscope::new();
-
- // `implied_output_region` is the region that will be assumed for any
- // region parameters in the return type. In accordance with the rules for
- // lifetime elision, we can determine it in two ways. First (determined
- // here), if self is by-reference, then the implied output region is the
- // region of the self parameter.
- let (self_ty, explicit_self_category) = match opt_self_info {
- None => (None, None),
- Some(self_info) => determine_self_type(this, &rb, self_info)
- };
+ fn ty_of_method_or_bare_fn<'a>(&self,
+ unsafety: hir::Unsafety,
+ abi: abi::Abi,
+ opt_self_info: Option<SelfInfo<'a, 'tcx>>,
+ decl: &hir::FnDecl)
+ -> (&'tcx ty::BareFnTy<'tcx>,
+ Option<ty::ExplicitSelfCategory>)
+ {
+ debug!("ty_of_method_or_bare_fn");
+
+ // New region names that appear inside of the arguments of the function
+ // declaration are bound to that function type.
+ let rb = rscope::BindingRscope::new();
+
+ // `implied_output_region` is the region that will be assumed for any
+ // region parameters in the return type. In accordance with the rules for
+ // lifetime elision, we can determine it in two ways. First (determined
+ // here), if self is by-reference, then the implied output region is the
+ // region of the self parameter.
+ let (self_ty, explicit_self_category) = match opt_self_info {
+ None => (None, None),
+ Some(self_info) => self.determine_self_type(&rb, self_info)
+ };
- // HACK(eddyb) replace the fake self type in the AST with the actual type.
- let arg_params = if self_ty.is_some() {
- &decl.inputs[1..]
- } else {
- &decl.inputs[..]
- };
- let arg_tys: Vec<Ty> =
- arg_params.iter().map(|a| ty_of_arg(this, &rb, a, None)).collect();
- let arg_pats: Vec<String> =
- arg_params.iter().map(|a| pprust::pat_to_string(&a.pat)).collect();
-
- // Second, if there was exactly one lifetime (either a substitution or a
- // reference) in the arguments, then any anonymous regions in the output
- // have that lifetime.
- let implied_output_region = match explicit_self_category {
- Some(ty::ExplicitSelfCategory::ByReference(region, _)) => Ok(region),
- _ => find_implied_output_region(this.tcx(), &arg_tys, arg_pats)
- };
+ // HACK(eddyb) replace the fake self type in the AST with the actual type.
+ let arg_params = if self_ty.is_some() {
+ &decl.inputs[1..]
+ } else {
+ &decl.inputs[..]
+ };
+ let arg_tys: Vec<Ty> =
+ arg_params.iter().map(|a| self.ty_of_arg(&rb, a, None)).collect();
+ let arg_pats: Vec<String> =
+ arg_params.iter().map(|a| pprust::pat_to_string(&a.pat)).collect();
+
+ // Second, if there was exactly one lifetime (either a substitution or a
+ // reference) in the arguments, then any anonymous regions in the output
+ // have that lifetime.
+ let implied_output_region = match explicit_self_category {
+ Some(ty::ExplicitSelfCategory::ByReference(region, _)) => Ok(region),
+ _ => self.find_implied_output_region(&arg_tys, arg_pats)
+ };
- let output_ty = match decl.output {
- hir::Return(ref output) =>
- ty::FnConverging(convert_ty_with_lifetime_elision(this,
- implied_output_region,
- &output)),
- hir::DefaultReturn(..) => ty::FnConverging(this.tcx().mk_nil()),
- hir::NoReturn(..) => ty::FnDiverging
- };
+ let output_ty = match decl.output {
+ hir::Return(ref output) =>
+ ty::FnConverging(self.convert_ty_with_lifetime_elision(implied_output_region,
+ &output)),
+ hir::DefaultReturn(..) => ty::FnConverging(self.tcx().mk_nil()),
+ hir::NoReturn(..) => ty::FnDiverging
+ };
- (ty::BareFnTy {
- unsafety: unsafety,
- abi: abi,
- sig: ty::Binder(ty::FnSig {
- inputs: self_ty.into_iter().chain(arg_tys).collect(),
- output: output_ty,
- variadic: decl.variadic
- }),
- }, explicit_self_category)
-}
+ (self.tcx().mk_bare_fn(ty::BareFnTy {
+ unsafety: unsafety,
+ abi: abi,
+ sig: ty::Binder(ty::FnSig {
+ inputs: self_ty.into_iter().chain(arg_tys).collect(),
+ output: output_ty,
+ variadic: decl.variadic
+ }),
+ }), explicit_self_category)
+ }
-fn determine_self_type<'a, 'tcx>(this: &AstConv<'tcx>,
- rscope: &RegionScope,
- self_info: SelfInfo<'a, 'tcx>)
- -> (Option<Ty<'tcx>>, Option<ty::ExplicitSelfCategory>)
-{
- let self_ty = self_info.untransformed_self_ty;
- return match self_info.explicit_self.node {
- hir::SelfStatic => (None, Some(ty::ExplicitSelfCategory::Static)),
- hir::SelfValue(_) => {
- (Some(self_ty), Some(ty::ExplicitSelfCategory::ByValue))
- }
- hir::SelfRegion(ref lifetime, mutability, _) => {
- let region =
- opt_ast_region_to_region(this,
- rscope,
- self_info.explicit_self.span,
- lifetime);
- (Some(this.tcx().mk_ref(
- this.tcx().mk_region(region),
- ty::TypeAndMut {
- ty: self_ty,
- mutbl: mutability
- })),
- Some(ty::ExplicitSelfCategory::ByReference(region, mutability)))
- }
- hir::SelfExplicit(ref ast_type, _) => {
- let explicit_type = ast_ty_to_ty(this, rscope, &ast_type);
-
- // We wish to (for now) categorize an explicit self
- // declaration like `self: SomeType` into either `self`,
- // `&self`, `&mut self`, or `Box<self>`. We do this here
- // by some simple pattern matching. A more precise check
- // is done later in `check_method_self_type()`.
- //
- // Examples:
- //
- // ```
- // impl Foo for &T {
- // // Legal declarations:
- // fn method1(self: &&T); // ExplicitSelfCategory::ByReference
- // fn method2(self: &T); // ExplicitSelfCategory::ByValue
- // fn method3(self: Box<&T>); // ExplicitSelfCategory::ByBox
- //
- // // Invalid cases will be caught later by `check_method_self_type`:
- // fn method_err1(self: &mut T); // ExplicitSelfCategory::ByReference
- // }
- // ```
- //
- // To do the check we just count the number of "modifiers"
- // on each type and compare them. If they are the same or
- // the impl has more, we call it "by value". Otherwise, we
- // look at the outermost modifier on the method decl and
- // call it by-ref, by-box as appropriate. For method1, for
- // example, the impl type has one modifier, but the method
- // type has two, so we end up with
- // ExplicitSelfCategory::ByReference.
-
- let impl_modifiers = count_modifiers(self_info.untransformed_self_ty);
- let method_modifiers = count_modifiers(explicit_type);
-
- debug!("determine_explicit_self_category(self_info.untransformed_self_ty={:?} \
- explicit_type={:?} \
- modifiers=({},{})",
- self_info.untransformed_self_ty,
- explicit_type,
- impl_modifiers,
- method_modifiers);
-
- let category = if impl_modifiers >= method_modifiers {
- ty::ExplicitSelfCategory::ByValue
- } else {
- match explicit_type.sty {
- ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(*r, mt.mutbl),
- ty::TyBox(_) => ty::ExplicitSelfCategory::ByBox,
- _ => ty::ExplicitSelfCategory::ByValue,
- }
- };
+ fn determine_self_type<'a>(&self,
+ rscope: &RegionScope,
+ self_info: SelfInfo<'a, 'tcx>)
+ -> (Option<Ty<'tcx>>, Option<ty::ExplicitSelfCategory>)
+ {
+ let self_ty = self_info.untransformed_self_ty;
+ return match self_info.explicit_self.node {
+ hir::SelfStatic => (None, Some(ty::ExplicitSelfCategory::Static)),
+ hir::SelfValue(_) => {
+ (Some(self_ty), Some(ty::ExplicitSelfCategory::ByValue))
+ }
+ hir::SelfRegion(ref lifetime, mutability, _) => {
+ let region =
+ self.opt_ast_region_to_region(rscope,
+ self_info.explicit_self.span,
+ lifetime);
+ (Some(self.tcx().mk_ref(
+ self.tcx().mk_region(region),
+ ty::TypeAndMut {
+ ty: self_ty,
+ mutbl: mutability
+ })),
+ Some(ty::ExplicitSelfCategory::ByReference(region, mutability)))
+ }
+ hir::SelfExplicit(ref ast_type, _) => {
+ let explicit_type = self.ast_ty_to_ty(rscope, &ast_type);
+
+ // We wish to (for now) categorize an explicit self
+ // declaration like `self: SomeType` into either `self`,
+ // `&self`, `&mut self`, or `Box<self>`. We do this here
+ // by some simple pattern matching. A more precise check
+ // is done later in `check_method_self_type()`.
+ //
+ // Examples:
+ //
+ // ```
+ // impl Foo for &T {
+ // // Legal declarations:
+ // fn method1(self: &&T); // ExplicitSelfCategory::ByReference
+ // fn method2(self: &T); // ExplicitSelfCategory::ByValue
+ // fn method3(self: Box<&T>); // ExplicitSelfCategory::ByBox
+ //
+ // // Invalid cases will be caught later by `check_method_self_type`:
+ // fn method_err1(self: &mut T); // ExplicitSelfCategory::ByReference
+ // }
+ // ```
+ //
+ // To do the check we just count the number of "modifiers"
+ // on each type and compare them. If they are the same or
+ // the impl has more, we call it "by value". Otherwise, we
+ // look at the outermost modifier on the method decl and
+ // call it by-ref, by-box as appropriate. For method1, for
+ // example, the impl type has one modifier, but the method
+ // type has two, so we end up with
+ // ExplicitSelfCategory::ByReference.
+
+ let impl_modifiers = count_modifiers(self_info.untransformed_self_ty);
+ let method_modifiers = count_modifiers(explicit_type);
+
+ debug!("determine_explicit_self_category(self_info.untransformed_self_ty={:?} \
+ explicit_type={:?} \
+ modifiers=({},{})",
+ self_info.untransformed_self_ty,
+ explicit_type,
+ impl_modifiers,
+ method_modifiers);
+
+ let category = if impl_modifiers >= method_modifiers {
+ ty::ExplicitSelfCategory::ByValue
+ } else {
+ match explicit_type.sty {
+ ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(*r, mt.mutbl),
+ ty::TyBox(_) => ty::ExplicitSelfCategory::ByBox,
+ _ => ty::ExplicitSelfCategory::ByValue,
+ }
+ };
- (Some(explicit_type), Some(category))
- }
- };
+ (Some(explicit_type), Some(category))
+ }
+ };
- fn count_modifiers(ty: Ty) -> usize {
- match ty.sty {
- ty::TyRef(_, mt) => count_modifiers(mt.ty) + 1,
- ty::TyBox(t) => count_modifiers(t) + 1,
- _ => 0,
+ fn count_modifiers(ty: Ty) -> usize {
+ match ty.sty {
+ ty::TyRef(_, mt) => count_modifiers(mt.ty) + 1,
+ ty::TyBox(t) => count_modifiers(t) + 1,
+ _ => 0,
+ }
}
}
-}
-pub fn ty_of_closure<'tcx>(
- this: &AstConv<'tcx>,
- unsafety: hir::Unsafety,
- decl: &hir::FnDecl,
- abi: abi::Abi,
- expected_sig: Option<ty::FnSig<'tcx>>)
- -> ty::ClosureTy<'tcx>
-{
- debug!("ty_of_closure(expected_sig={:?})",
- expected_sig);
-
- // new region names that appear inside of the fn decl are bound to
- // that function type
- let rb = rscope::BindingRscope::new();
-
- let input_tys: Vec<_> = decl.inputs.iter().enumerate().map(|(i, a)| {
- let expected_arg_ty = expected_sig.as_ref().and_then(|e| {
- // no guarantee that the correct number of expected args
- // were supplied
- if i < e.inputs.len() {
- Some(e.inputs[i])
- } else {
- None
- }
- });
- ty_of_arg(this, &rb, a, expected_arg_ty)
- }).collect();
+ pub fn ty_of_closure(&self,
+ unsafety: hir::Unsafety,
+ decl: &hir::FnDecl,
+ abi: abi::Abi,
+ expected_sig: Option<ty::FnSig<'tcx>>)
+ -> ty::ClosureTy<'tcx>
+ {
+ debug!("ty_of_closure(expected_sig={:?})",
+ expected_sig);
+
+ // new region names that appear inside of the fn decl are bound to
+ // that function type
+ let rb = rscope::BindingRscope::new();
+
+ let input_tys: Vec<_> = decl.inputs.iter().enumerate().map(|(i, a)| {
+ let expected_arg_ty = expected_sig.as_ref().and_then(|e| {
+ // no guarantee that the correct number of expected args
+ // were supplied
+ if i < e.inputs.len() {
+ Some(e.inputs[i])
+ } else {
+ None
+ }
+ });
+ self.ty_of_arg(&rb, a, expected_arg_ty)
+ }).collect();
- let expected_ret_ty = expected_sig.map(|e| e.output);
+ let expected_ret_ty = expected_sig.map(|e| e.output);
- let is_infer = match decl.output {
- hir::Return(ref output) if output.node == hir::TyInfer => true,
- hir::DefaultReturn(..) => true,
- _ => false
- };
+ let is_infer = match decl.output {
+ hir::Return(ref output) if output.node == hir::TyInfer => true,
+ hir::DefaultReturn(..) => true,
+ _ => false
+ };
- let output_ty = match decl.output {
- _ if is_infer && expected_ret_ty.is_some() =>
- expected_ret_ty.unwrap(),
- _ if is_infer =>
- ty::FnConverging(this.ty_infer(None, None, None, decl.output.span())),
- hir::Return(ref output) =>
- ty::FnConverging(ast_ty_to_ty(this, &rb, &output)),
- hir::DefaultReturn(..) => bug!(),
- hir::NoReturn(..) => ty::FnDiverging
- };
+ let output_ty = match decl.output {
+ _ if is_infer && expected_ret_ty.is_some() =>
+ expected_ret_ty.unwrap(),
+ _ if is_infer =>
+ ty::FnConverging(self.ty_infer(None, None, None, decl.output.span())),
+ hir::Return(ref output) =>
+ ty::FnConverging(self.ast_ty_to_ty(&rb, &output)),
+ hir::DefaultReturn(..) => bug!(),
+ hir::NoReturn(..) => ty::FnDiverging
+ };
- debug!("ty_of_closure: input_tys={:?}", input_tys);
- debug!("ty_of_closure: output_ty={:?}", output_ty);
+ debug!("ty_of_closure: input_tys={:?}", input_tys);
+ debug!("ty_of_closure: output_ty={:?}", output_ty);
- ty::ClosureTy {
- unsafety: unsafety,
- abi: abi,
- sig: ty::Binder(ty::FnSig {inputs: input_tys,
- output: output_ty,
- variadic: decl.variadic}),
+ ty::ClosureTy {
+ unsafety: unsafety,
+ abi: abi,
+ sig: ty::Binder(ty::FnSig {inputs: input_tys,
+ output: output_ty,
+ variadic: decl.variadic}),
+ }
}
-}
-
-/// Given an existential type like `Foo+'a+Bar`, this routine converts the `'a` and `Bar` intos an
-/// `ExistentialBounds` struct. The `main_trait_refs` argument specifies the `Foo` -- it is absent
-/// for closures. Eventually this should all be normalized, I think, so that there is no "main
-/// trait ref" and instead we just have a flat list of bounds as the existential type.
-fn conv_existential_bounds<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- principal_trait_ref: ty::PolyTraitRef<'tcx>,
- projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
- ast_bounds: &[hir::TyParamBound])
- -> ty::ExistentialBounds<'tcx>
-{
- let partitioned_bounds =
- partition_bounds(this.tcx(), span, ast_bounds);
- conv_existential_bounds_from_partitioned_bounds(
- this, rscope, span, principal_trait_ref, projection_bounds, partitioned_bounds)
-}
+ /// Given an existential type like `Foo+'a+Bar`, this routine converts
+ /// the `'a` and `Bar` intos an `ExistentialBounds` struct.
+ /// The `main_trait_refs` argument specifies the `Foo` -- it is absent
+ /// for closures. Eventually this should all be normalized, I think,
+ /// so that there is no "main trait ref" and instead we just have a flat
+ /// list of bounds as the existential type.
+ fn conv_existential_bounds(&self,
+ rscope: &RegionScope,
+ span: Span,
+ principal_trait_ref: ty::PolyTraitRef<'tcx>,
+ projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
+ ast_bounds: &[hir::TyParamBound])
+ -> ty::ExistentialBounds<'tcx>
+ {
+ let partitioned_bounds =
+ partition_bounds(self.tcx(), span, ast_bounds);
-fn conv_ty_poly_trait_ref<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- ast_bounds: &[hir::TyParamBound])
- -> Ty<'tcx>
-{
- let mut partitioned_bounds = partition_bounds(this.tcx(), span, &ast_bounds[..]);
-
- let mut projection_bounds = Vec::new();
- let main_trait_bound = if !partitioned_bounds.trait_bounds.is_empty() {
- let trait_bound = partitioned_bounds.trait_bounds.remove(0);
- instantiate_poly_trait_ref(this,
- rscope,
- trait_bound,
- None,
- &mut projection_bounds)
- } else {
- span_err!(this.tcx().sess, span, E0224,
- "at least one non-builtin trait is required for an object type");
- return this.tcx().types.err;
- };
+ self.conv_existential_bounds_from_partitioned_bounds(
+ rscope, span, principal_trait_ref, projection_bounds, partitioned_bounds)
+ }
- let bounds =
- conv_existential_bounds_from_partitioned_bounds(this,
- rscope,
- span,
- main_trait_bound.clone(),
- projection_bounds,
- partitioned_bounds);
+ fn conv_ty_poly_trait_ref(&self,
+ rscope: &RegionScope,
+ span: Span,
+ ast_bounds: &[hir::TyParamBound])
+ -> Ty<'tcx>
+ {
+ let mut partitioned_bounds = partition_bounds(self.tcx(), span, &ast_bounds[..]);
+
+ let mut projection_bounds = Vec::new();
+ let main_trait_bound = if !partitioned_bounds.trait_bounds.is_empty() {
+ let trait_bound = partitioned_bounds.trait_bounds.remove(0);
+ self.instantiate_poly_trait_ref(rscope,
+ trait_bound,
+ None,
+ &mut projection_bounds)
+ } else {
+ span_err!(self.tcx().sess, span, E0224,
+ "at least one non-builtin trait is required for an object type");
+ return self.tcx().types.err;
+ };
- make_object_type(this, span, main_trait_bound, bounds)
-}
+ let bounds =
+ self.conv_existential_bounds_from_partitioned_bounds(rscope,
+ span,
+ main_trait_bound.clone(),
+ projection_bounds,
+ partitioned_bounds);
-pub fn conv_existential_bounds_from_partitioned_bounds<'tcx>(
- this: &AstConv<'tcx>,
- rscope: &RegionScope,
- span: Span,
- principal_trait_ref: ty::PolyTraitRef<'tcx>,
- projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>, // Empty for boxed closures
- partitioned_bounds: PartitionedBounds)
- -> ty::ExistentialBounds<'tcx>
-{
- let PartitionedBounds { builtin_bounds,
- trait_bounds,
- region_bounds } =
- partitioned_bounds;
-
- if !trait_bounds.is_empty() {
- let b = &trait_bounds[0];
- span_err!(this.tcx().sess, b.trait_ref.path.span, E0225,
- "only the builtin traits can be used as closure or object bounds");
+ self.make_object_type(span, main_trait_bound, bounds)
}
- let region_bound =
- compute_object_lifetime_bound(this,
- span,
- ®ion_bounds,
- principal_trait_ref,
- builtin_bounds);
-
- let region_bound = match region_bound {
- Some(r) => r,
- None => {
- match rscope.object_lifetime_default(span) {
- Some(r) => r,
- None => {
- span_err!(this.tcx().sess, span, E0228,
- "the lifetime bound for this object type cannot be deduced \
- from context; please supply an explicit bound");
- ty::ReStatic
+ pub fn conv_existential_bounds_from_partitioned_bounds(&self,
+ rscope: &RegionScope,
+ span: Span,
+ principal_trait_ref: ty::PolyTraitRef<'tcx>,
+ projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>, // Empty for boxed closures
+ partitioned_bounds: PartitionedBounds)
+ -> ty::ExistentialBounds<'tcx>
+ {
+ let PartitionedBounds { builtin_bounds,
+ trait_bounds,
+ region_bounds } =
+ partitioned_bounds;
+
+ if !trait_bounds.is_empty() {
+ let b = &trait_bounds[0];
+ span_err!(self.tcx().sess, b.trait_ref.path.span, E0225,
+ "only the builtin traits can be used as closure or object bounds");
+ }
+
+ let region_bound =
+ self.compute_object_lifetime_bound(span,
+ ®ion_bounds,
+ principal_trait_ref,
+ builtin_bounds);
+
+ let region_bound = match region_bound {
+ Some(r) => r,
+ None => {
+ match rscope.object_lifetime_default(span) {
+ Some(r) => r,
+ None => {
+ span_err!(self.tcx().sess, span, E0228,
+ "the lifetime bound for this object type cannot be deduced \
+ from context; please supply an explicit bound");
+ ty::ReStatic
+ }
}
}
- }
- };
+ };
- debug!("region_bound: {:?}", region_bound);
+ debug!("region_bound: {:?}", region_bound);
- ty::ExistentialBounds::new(region_bound, builtin_bounds, projection_bounds)
-}
+ ty::ExistentialBounds::new(region_bound, builtin_bounds, projection_bounds)
+ }
-/// Given the bounds on an object, determines what single region bound
-/// (if any) we can use to summarize this type. The basic idea is that we will use the bound the
-/// user provided, if they provided one, and otherwise search the supertypes of trait bounds for
-/// region bounds. It may be that we can derive no bound at all, in which case we return `None`.
-fn compute_object_lifetime_bound<'tcx>(
- this: &AstConv<'tcx>,
- span: Span,
- explicit_region_bounds: &[&hir::Lifetime],
- principal_trait_ref: ty::PolyTraitRef<'tcx>,
- builtin_bounds: ty::BuiltinBounds)
- -> Option<ty::Region> // if None, use the default
-{
- let tcx = this.tcx();
+ /// Given the bounds on an object, determines what single region bound (if any) we can
+ /// use to summarize this type. The basic idea is that we will use the bound the user
+ /// provided, if they provided one, and otherwise search the supertypes of trait bounds
+ /// for region bounds. It may be that we can derive no bound at all, in which case
+ /// we return `None`.
+ fn compute_object_lifetime_bound(&self,
+ span: Span,
+ explicit_region_bounds: &[&hir::Lifetime],
+ principal_trait_ref: ty::PolyTraitRef<'tcx>,
+ builtin_bounds: ty::BuiltinBounds)
+ -> Option<ty::Region> // if None, use the default
+ {
+ let tcx = self.tcx();
- debug!("compute_opt_region_bound(explicit_region_bounds={:?}, \
- principal_trait_ref={:?}, builtin_bounds={:?})",
- explicit_region_bounds,
- principal_trait_ref,
- builtin_bounds);
+ debug!("compute_opt_region_bound(explicit_region_bounds={:?}, \
+ principal_trait_ref={:?}, builtin_bounds={:?})",
+ explicit_region_bounds,
+ principal_trait_ref,
+ builtin_bounds);
- if explicit_region_bounds.len() > 1 {
- span_err!(tcx.sess, explicit_region_bounds[1].span, E0226,
- "only a single explicit lifetime bound is permitted");
- }
+ if explicit_region_bounds.len() > 1 {
+ span_err!(tcx.sess, explicit_region_bounds[1].span, E0226,
+ "only a single explicit lifetime bound is permitted");
+ }
- if !explicit_region_bounds.is_empty() {
- // Explicitly specified region bound. Use that.
- let r = explicit_region_bounds[0];
- return Some(ast_region_to_region(tcx, r));
- }
+ if !explicit_region_bounds.is_empty() {
+ // Explicitly specified region bound. Use that.
+ let r = explicit_region_bounds[0];
+ return Some(ast_region_to_region(tcx, r));
+ }
- if let Err(ErrorReported) = this.ensure_super_predicates(span,principal_trait_ref.def_id()) {
- return Some(ty::ReStatic);
- }
+ if let Err(ErrorReported) =
+ self.ensure_super_predicates(span, principal_trait_ref.def_id()) {
+ return Some(ty::ReStatic);
+ }
- // No explicit region bound specified. Therefore, examine trait
- // bounds and see if we can derive region bounds from those.
- let derived_region_bounds =
- object_region_bounds(tcx, &principal_trait_ref, builtin_bounds);
+ // No explicit region bound specified. Therefore, examine trait
+ // bounds and see if we can derive region bounds from those.
+ let derived_region_bounds =
+ object_region_bounds(tcx, &principal_trait_ref, builtin_bounds);
- // If there are no derived region bounds, then report back that we
- // can find no region bound. The caller will use the default.
- if derived_region_bounds.is_empty() {
- return None;
- }
+ // If there are no derived region bounds, then report back that we
+ // can find no region bound. The caller will use the default.
+ if derived_region_bounds.is_empty() {
+ return None;
+ }
- // If any of the derived region bounds are 'static, that is always
- // the best choice.
- if derived_region_bounds.iter().any(|r| ty::ReStatic == *r) {
- return Some(ty::ReStatic);
- }
+ // If any of the derived region bounds are 'static, that is always
+ // the best choice.
+ if derived_region_bounds.iter().any(|r| ty::ReStatic == *r) {
+ return Some(ty::ReStatic);
+ }
- // Determine whether there is exactly one unique region in the set
- // of derived region bounds. If so, use that. Otherwise, report an
- // error.
- let r = derived_region_bounds[0];
- if derived_region_bounds[1..].iter().any(|r1| r != *r1) {
- span_err!(tcx.sess, span, E0227,
- "ambiguous lifetime bound, explicit lifetime bound required");
+ // Determine whether there is exactly one unique region in the set
+ // of derived region bounds. If so, use that. Otherwise, report an
+ // error.
+ let r = derived_region_bounds[0];
+ if derived_region_bounds[1..].iter().any(|r1| r != *r1) {
+ span_err!(tcx.sess, span, E0227,
+ "ambiguous lifetime bound, explicit lifetime bound required");
+ }
+ return Some(r);
}
- return Some(r);
}
pub struct PartitionedBounds<'a> {
/// Divides a list of bounds from the AST into three groups: builtin bounds (Copy, Sized etc),
/// general trait bounds, and region bounds.
-pub fn partition_bounds<'a>(tcx: &TyCtxt,
- _span: Span,
- ast_bounds: &'a [hir::TyParamBound])
- -> PartitionedBounds<'a>
+pub fn partition_bounds<'a, 'b, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ _span: Span,
+ ast_bounds: &'b [hir::TyParamBound])
+ -> PartitionedBounds<'b>
{
let mut builtin_bounds = ty::BuiltinBounds::empty();
let mut region_bounds = Vec::new();
}
}
-fn prohibit_projections<'tcx>(tcx: &TyCtxt<'tcx>,
- bindings: &[ConvertedBinding<'tcx>])
-{
- for binding in bindings.iter().take(1) {
- prohibit_projection(tcx, binding.span);
- }
-}
-
-fn check_type_argument_count(tcx: &TyCtxt, span: Span, supplied: usize,
+fn check_type_argument_count(tcx: TyCtxt, span: Span, supplied: usize,
required: usize, accepted: usize) {
if supplied < required {
let expected = if required < accepted {
}
}
-fn report_lifetime_number_error(tcx: &TyCtxt, span: Span, number: usize, expected: usize) {
+fn report_lifetime_number_error(tcx: TyCtxt, span: Span, number: usize, expected: usize) {
span_err!(tcx.sess, span, E0107,
"wrong number of lifetime parameters: expected {}, found {}",
expected, number);
pub projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
}
-impl<'tcx> Bounds<'tcx> {
- pub fn predicates(&self,
- tcx: &TyCtxt<'tcx>,
- param_ty: Ty<'tcx>)
- -> Vec<ty::Predicate<'tcx>>
+impl<'a, 'gcx, 'tcx> Bounds<'tcx> {
+ pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, param_ty: Ty<'tcx>)
+ -> Vec<ty::Predicate<'tcx>>
{
let mut vec = Vec::new();
for builtin_bound in &self.builtin_bounds {
- match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty) {
+ match tcx.trait_ref_for_builtin_bound(builtin_bound, param_ty) {
Ok(trait_ref) => { vec.push(trait_ref.to_predicate()); }
Err(ErrorReported) => { }
}
use hir::pat_util::pat_is_resolved_const;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference};
-use check::{check_expr, check_expr_has_type, check_expr_with_expectation};
-use check::{demand, FnCtxt, Expectation};
-use check::{check_expr_with_lvalue_pref};
-use check::{instantiate_path, resolve_ty_and_def_ufcs, structurally_resolved_type};
-use check::coercion;
+use check::{FnCtxt, Expectation};
use lint;
-use require_same_types;
use util::nodemap::FnvHashMap;
use session::Session;
use std::cmp;
use std::collections::hash_map::Entry::{Occupied, Vacant};
+use std::ops::Deref;
use syntax::ast;
use syntax::codemap::{Span, Spanned};
use syntax::ptr::P;
use rustc::hir::{self, PatKind};
use rustc::hir::print as pprust;
-pub fn check_pat<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
- pat: &'tcx hir::Pat,
- expected: Ty<'tcx>)
-{
- let fcx = pcx.fcx;
- let tcx = pcx.fcx.ccx.tcx;
+pub struct PatCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
+ pub map: PatIdMap,
+}
- debug!("check_pat(pat={:?},expected={:?})",
- pat,
- expected);
+impl<'a, 'gcx, 'tcx> Deref for PatCtxt<'a, 'gcx, 'tcx> {
+ type Target = FnCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ self.fcx
+ }
+}
- match pat.node {
- PatKind::Wild => {
- fcx.write_ty(pat.id, expected);
- }
- PatKind::Lit(ref lt) => {
- check_expr(fcx, <);
- let expr_ty = fcx.expr_ty(<);
-
- // Byte string patterns behave the same way as array patterns
- // They can denote both statically and dynamically sized byte arrays
- let mut pat_ty = expr_ty;
- if let hir::ExprLit(ref lt) = lt.node {
- if let ast::LitKind::ByteStr(_) = lt.node {
- let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
- if let ty::TyRef(_, mt) = expected_ty.sty {
- if let ty::TySlice(_) = mt.ty.sty {
- pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
- tcx.mk_slice(tcx.types.u8))
+// This function exists due to the warning "diagnostic code E0164 already used"
+fn bad_struct_kind_err(sess: &Session, pat: &hir::Pat, path: &hir::Path, lint: bool) {
+ let name = pprust::path_to_string(path);
+ let msg = format!("`{}` does not name a tuple variant or a tuple struct", name);
+ if lint {
+ sess.add_lint(lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT,
+ pat.id,
+ pat.span,
+ msg);
+ } else {
+ span_err!(sess, pat.span, E0164, "{}", msg);
+ }
+}
+
+impl<'a, 'gcx, 'tcx> PatCtxt<'a, 'gcx, 'tcx> {
+ pub fn check_pat(&self, pat: &'gcx hir::Pat, expected: Ty<'tcx>) {
+ let tcx = self.tcx;
+
+ debug!("check_pat(pat={:?},expected={:?})", pat, expected);
+
+ match pat.node {
+ PatKind::Wild => {
+ self.write_ty(pat.id, expected);
+ }
+ PatKind::Lit(ref lt) => {
+ self.check_expr(<);
+ let expr_ty = self.expr_ty(<);
+
+ // Byte string patterns behave the same way as array patterns
+ // They can denote both statically and dynamically sized byte arrays
+ let mut pat_ty = expr_ty;
+ if let hir::ExprLit(ref lt) = lt.node {
+ if let ast::LitKind::ByteStr(_) = lt.node {
+ let expected_ty = self.structurally_resolved_type(pat.span, expected);
+ if let ty::TyRef(_, mt) = expected_ty.sty {
+ if let ty::TySlice(_) = mt.ty.sty {
+ pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ tcx.mk_slice(tcx.types.u8))
+ }
}
}
}
- }
- fcx.write_ty(pat.id, pat_ty);
-
- // somewhat surprising: in this case, the subtyping
- // relation goes the opposite way as the other
- // cases. Actually what we really want is not a subtyping
- // relation at all but rather that there exists a LUB (so
- // that they can be compared). However, in practice,
- // constants are always scalars or strings. For scalars
- // subtyping is irrelevant, and for strings `expr_ty` is
- // type is `&'static str`, so if we say that
- //
- // &'static str <: expected
- //
- // that's equivalent to there existing a LUB.
- demand::suptype(fcx, pat.span, expected, pat_ty);
- }
- PatKind::Range(ref begin, ref end) => {
- check_expr(fcx, begin);
- check_expr(fcx, end);
-
- let lhs_ty = fcx.expr_ty(begin);
- let rhs_ty = fcx.expr_ty(end);
-
- // Check that both end-points are of numeric or char type.
- let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char();
- let lhs_compat = numeric_or_char(lhs_ty);
- let rhs_compat = numeric_or_char(rhs_ty);
-
- if !lhs_compat || !rhs_compat {
- let span = if !lhs_compat && !rhs_compat {
- pat.span
- } else if !lhs_compat {
- begin.span
- } else {
- end.span
- };
-
- // Note: spacing here is intentional, we want a space before "start" and "end".
- span_err!(tcx.sess, span, E0029,
- "only char and numeric types are allowed in range patterns\n \
- start type: {}\n end type: {}",
- fcx.infcx().ty_to_string(lhs_ty),
- fcx.infcx().ty_to_string(rhs_ty)
- );
- return;
+ self.write_ty(pat.id, pat_ty);
+
+ // somewhat surprising: in this case, the subtyping
+ // relation goes the opposite way as the other
+ // cases. Actually what we really want is not a subtyping
+ // relation at all but rather that there exists a LUB (so
+ // that they can be compared). However, in practice,
+ // constants are always scalars or strings. For scalars
+ // subtyping is irrelevant, and for strings `expr_ty` is
+ // type is `&'static str`, so if we say that
+ //
+ // &'static str <: expected
+ //
+ // that's equivalent to there existing a LUB.
+ self.demand_suptype(pat.span, expected, pat_ty);
}
+ PatKind::Range(ref begin, ref end) => {
+ self.check_expr(begin);
+ self.check_expr(end);
+
+ let lhs_ty = self.expr_ty(begin);
+ let rhs_ty = self.expr_ty(end);
+
+ // Check that both end-points are of numeric or char type.
+ let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char();
+ let lhs_compat = numeric_or_char(lhs_ty);
+ let rhs_compat = numeric_or_char(rhs_ty);
+
+ if !lhs_compat || !rhs_compat {
+ let span = if !lhs_compat && !rhs_compat {
+ pat.span
+ } else if !lhs_compat {
+ begin.span
+ } else {
+ end.span
+ };
+
+ // Note: spacing here is intentional, we want a space before "start" and "end".
+ span_err!(tcx.sess, span, E0029,
+ "only char and numeric types are allowed in range patterns\n \
+ start type: {}\n end type: {}",
+ self.ty_to_string(lhs_ty),
+ self.ty_to_string(rhs_ty)
+ );
+ return;
+ }
- // Check that the types of the end-points can be unified.
- let types_unify = require_same_types(
- tcx, Some(fcx.infcx()), false, pat.span, rhs_ty, lhs_ty,
- "mismatched types in range",
- );
+ // Check that the types of the end-points can be unified.
+ let types_unify = self.require_same_types(pat.span, rhs_ty, lhs_ty,
+ "mismatched types in range");
- // It's ok to return without a message as `require_same_types` prints an error.
- if !types_unify {
- return;
- }
+ // It's ok to return without a message as `require_same_types` prints an error.
+ if !types_unify {
+ return;
+ }
- // Now that we know the types can be unified we find the unified type and use
- // it to type the entire expression.
- let common_type = fcx.infcx().resolve_type_vars_if_possible(&lhs_ty);
+ // Now that we know the types can be unified we find the unified type and use
+ // it to type the entire expression.
+ let common_type = self.resolve_type_vars_if_possible(&lhs_ty);
- fcx.write_ty(pat.id, common_type);
+ self.write_ty(pat.id, common_type);
- // subtyping doesn't matter here, as the value is some kind of scalar
- demand::eqtype(fcx, pat.span, expected, lhs_ty);
- }
- PatKind::Path(..) | PatKind::Ident(..)
- if pat_is_resolved_const(&tcx.def_map.borrow(), pat) => {
- if let Some(pat_def) = tcx.def_map.borrow().get(&pat.id) {
- let const_did = pat_def.def_id();
- let const_scheme = tcx.lookup_item_type(const_did);
- assert!(const_scheme.generics.is_empty());
- let const_ty = pcx.fcx.instantiate_type_scheme(pat.span,
- &Substs::empty(),
- &const_scheme.ty);
- fcx.write_ty(pat.id, const_ty);
-
- // FIXME(#20489) -- we should limit the types here to scalars or something!
-
- // As with PatKind::Lit, what we really want here is that there
- // exist a LUB, but for the cases that can occur, subtype
- // is good enough.
- demand::suptype(fcx, pat.span, expected, const_ty);
- } else {
- fcx.write_error(pat.id);
+ // subtyping doesn't matter here, as the value is some kind of scalar
+ self.demand_eqtype(pat.span, expected, lhs_ty);
}
- }
- PatKind::Ident(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map.borrow(), pat) => {
- let typ = fcx.local_ty(pat.span, pat.id);
- match bm {
- hir::BindByRef(mutbl) => {
- // if the binding is like
- // ref x | ref const x | ref mut x
- // then `x` is assigned a value of type `&M T` where M is the mutability
- // and T is the expected type.
- let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl };
- let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt);
-
- // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is
- // required. However, we use equality, which is stronger. See (*) for
- // an explanation.
- demand::eqtype(fcx, pat.span, region_ty, typ);
- }
- // otherwise the type of x is the expected type T
- hir::BindByValue(_) => {
- // As above, `T <: typeof(x)` is required but we
- // use equality, see (*) below.
- demand::eqtype(fcx, pat.span, expected, typ);
+ PatKind::Path(..) | PatKind::Ident(..)
+ if pat_is_resolved_const(&tcx.def_map.borrow(), pat) => {
+ if let Some(pat_def) = tcx.def_map.borrow().get(&pat.id) {
+ let const_did = pat_def.def_id();
+ let const_scheme = tcx.lookup_item_type(const_did);
+ assert!(const_scheme.generics.is_empty());
+ let const_ty = self.instantiate_type_scheme(pat.span,
+ &Substs::empty(),
+ &const_scheme.ty);
+ self.write_ty(pat.id, const_ty);
+
+ // FIXME(#20489) -- we should limit the types here to scalars or something!
+
+ // As with PatKind::Lit, what we really want here is that there
+ // exist a LUB, but for the cases that can occur, subtype
+ // is good enough.
+ self.demand_suptype(pat.span, expected, const_ty);
+ } else {
+ self.write_error(pat.id);
}
}
+ PatKind::Ident(bm, ref path, ref sub)
+ if pat_is_binding(&tcx.def_map.borrow(), pat) => {
+ let typ = self.local_ty(pat.span, pat.id);
+ match bm {
+ hir::BindByRef(mutbl) => {
+ // if the binding is like
+ // ref x | ref const x | ref mut x
+ // then `x` is assigned a value of type `&M T` where M is the mutability
+ // and T is the expected type.
+ let region_var = self.next_region_var(infer::PatternRegion(pat.span));
+ let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl };
+ let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt);
+
+ // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is
+ // required. However, we use equality, which is stronger. See (*) for
+ // an explanation.
+ self.demand_eqtype(pat.span, region_ty, typ);
+ }
+ // otherwise the type of x is the expected type T
+ hir::BindByValue(_) => {
+ // As above, `T <: typeof(x)` is required but we
+ // use equality, see (*) below.
+ self.demand_eqtype(pat.span, expected, typ);
+ }
+ }
- fcx.write_ty(pat.id, typ);
+ self.write_ty(pat.id, typ);
- // if there are multiple arms, make sure they all agree on
- // what the type of the binding `x` ought to be
- if let Some(&canon_id) = pcx.map.get(&path.node.name) {
- if canon_id != pat.id {
- let ct = fcx.local_ty(pat.span, canon_id);
- demand::eqtype(fcx, pat.span, ct, typ);
- }
+ // if there are multiple arms, make sure they all agree on
+ // what the type of the binding `x` ought to be
+ if let Some(&canon_id) = self.map.get(&path.node.name) {
+ if canon_id != pat.id {
+ let ct = self.local_ty(pat.span, canon_id);
+ self.demand_eqtype(pat.span, ct, typ);
+ }
- if let Some(ref p) = *sub {
- check_pat(pcx, &p, expected);
+ if let Some(ref p) = *sub {
+ self.check_pat(&p, expected);
+ }
}
}
- }
- PatKind::Ident(_, ref path, _) => {
- let path = hir::Path::from_ident(path.span, path.node);
- check_pat_enum(pcx, pat, &path, Some(&[]), expected, false);
- }
- PatKind::TupleStruct(ref path, ref subpats) => {
- check_pat_enum(pcx, pat, path, subpats.as_ref().map(|v| &v[..]), expected, true);
- }
- PatKind::Path(ref path) => {
- check_pat_enum(pcx, pat, path, Some(&[]), expected, false);
- }
- PatKind::QPath(ref qself, ref path) => {
- let self_ty = fcx.to_ty(&qself.ty);
- let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) {
- if d.base_def == Def::Err {
- fcx.infcx().set_tainted_by_errors();
- fcx.write_error(pat.id);
+ PatKind::Ident(_, ref path, _) => {
+ let path = hir::Path::from_ident(path.span, path.node);
+ self.check_pat_enum(pat, &path, Some(&[]), expected, false);
+ }
+ PatKind::TupleStruct(ref path, ref subpats) => {
+ self.check_pat_enum(pat, path, subpats.as_ref().map(|v| &v[..]), expected, true);
+ }
+ PatKind::Path(ref path) => {
+ self.check_pat_enum(pat, path, Some(&[]), expected, false);
+ }
+ PatKind::QPath(ref qself, ref path) => {
+ let self_ty = self.to_ty(&qself.ty);
+ let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) {
+ if d.base_def == Def::Err {
+ self.set_tainted_by_errors();
+ self.write_error(pat.id);
+ return;
+ }
+ d
+ } else if qself.position == 0 {
+ // This is just a sentinel for finish_resolving_def_to_ty.
+ let sentinel = self.tcx.map.local_def_id(ast::CRATE_NODE_ID);
+ def::PathResolution {
+ base_def: Def::Mod(sentinel),
+ depth: path.segments.len()
+ }
+ } else {
+ debug!("unbound path {:?}", pat);
+ self.write_error(pat.id);
return;
+ };
+ if let Some((opt_ty, segments, def)) =
+ self.resolve_ty_and_def_ufcs(path_res, Some(self_ty),
+ path, pat.span, pat.id) {
+ if self.check_assoc_item_is_const(def, pat.span) {
+ let scheme = tcx.lookup_item_type(def.def_id());
+ let predicates = tcx.lookup_predicates(def.def_id());
+ self.instantiate_path(segments, scheme, &predicates,
+ opt_ty, def, pat.span, pat.id);
+ let const_ty = self.node_ty(pat.id);
+ self.demand_suptype(pat.span, expected, const_ty);
+ } else {
+ self.write_error(pat.id)
+ }
}
- d
- } else if qself.position == 0 {
- // This is just a sentinel for finish_resolving_def_to_ty.
- let sentinel = fcx.tcx().map.local_def_id(ast::CRATE_NODE_ID);
- def::PathResolution {
- base_def: Def::Mod(sentinel),
- depth: path.segments.len()
+ }
+ PatKind::Struct(ref path, ref fields, etc) => {
+ self.check_pat_struct(pat, path, fields, etc, expected);
+ }
+ PatKind::Tup(ref elements) => {
+ let element_tys: Vec<_> =
+ (0..elements.len()).map(|_| self.next_ty_var()).collect();
+ let pat_ty = tcx.mk_tup(element_tys.clone());
+ self.write_ty(pat.id, pat_ty);
+ self.demand_eqtype(pat.span, expected, pat_ty);
+ for (element_pat, element_ty) in elements.iter().zip(element_tys) {
+ self.check_pat(&element_pat, element_ty);
}
- } else {
- debug!("unbound path {:?}", pat);
- fcx.write_error(pat.id);
- return;
- };
- if let Some((opt_ty, segments, def)) =
- resolve_ty_and_def_ufcs(fcx, path_res, Some(self_ty),
- path, pat.span, pat.id) {
- if check_assoc_item_is_const(pcx, def, pat.span) {
- let scheme = tcx.lookup_item_type(def.def_id());
- let predicates = tcx.lookup_predicates(def.def_id());
- instantiate_path(fcx, segments,
- scheme, &predicates,
- opt_ty, def, pat.span, pat.id);
- let const_ty = fcx.node_ty(pat.id);
- demand::suptype(fcx, pat.span, expected, const_ty);
+ }
+ PatKind::Box(ref inner) => {
+ let inner_ty = self.next_ty_var();
+ let uniq_ty = tcx.mk_box(inner_ty);
+
+ if self.check_dereferencable(pat.span, expected, &inner) {
+ // Here, `demand::subtype` is good enough, but I don't
+ // think any errors can be introduced by using
+ // `demand::eqtype`.
+ self.demand_eqtype(pat.span, expected, uniq_ty);
+ self.write_ty(pat.id, uniq_ty);
+ self.check_pat(&inner, inner_ty);
} else {
- fcx.write_error(pat.id)
+ self.write_error(pat.id);
+ self.check_pat(&inner, tcx.types.err);
}
}
- }
- PatKind::Struct(ref path, ref fields, etc) => {
- check_pat_struct(pcx, pat, path, fields, etc, expected);
- }
- PatKind::Tup(ref elements) => {
- let element_tys: Vec<_> =
- (0..elements.len()).map(|_| fcx.infcx().next_ty_var())
- .collect();
- let pat_ty = tcx.mk_tup(element_tys.clone());
- fcx.write_ty(pat.id, pat_ty);
- demand::eqtype(fcx, pat.span, expected, pat_ty);
- for (element_pat, element_ty) in elements.iter().zip(element_tys) {
- check_pat(pcx, &element_pat, element_ty);
- }
- }
- PatKind::Box(ref inner) => {
- let inner_ty = fcx.infcx().next_ty_var();
- let uniq_ty = tcx.mk_box(inner_ty);
-
- if check_dereferencable(pcx, pat.span, expected, &inner) {
- // Here, `demand::subtype` is good enough, but I don't
- // think any errors can be introduced by using
- // `demand::eqtype`.
- demand::eqtype(fcx, pat.span, expected, uniq_ty);
- fcx.write_ty(pat.id, uniq_ty);
- check_pat(pcx, &inner, inner_ty);
- } else {
- fcx.write_error(pat.id);
- check_pat(pcx, &inner, tcx.types.err);
- }
- }
- PatKind::Ref(ref inner, mutbl) => {
- let expected = fcx.infcx().shallow_resolve(expected);
- if check_dereferencable(pcx, pat.span, expected, &inner) {
- // `demand::subtype` would be good enough, but using
- // `eqtype` turns out to be equally general. See (*)
- // below for details.
+ PatKind::Ref(ref inner, mutbl) => {
+ let expected = self.shallow_resolve(expected);
+ if self.check_dereferencable(pat.span, expected, &inner) {
+ // `demand::subtype` would be good enough, but using
+ // `eqtype` turns out to be equally general. See (*)
+ // below for details.
+
+ // Take region, inner-type from expected type if we
+ // can, to avoid creating needless variables. This
+ // also helps with the bad interactions of the given
+ // hack detailed in (*) below.
+ let (rptr_ty, inner_ty) = match expected.sty {
+ ty::TyRef(_, mt) if mt.mutbl == mutbl => {
+ (expected, mt.ty)
+ }
+ _ => {
+ let inner_ty = self.next_ty_var();
+ let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
+ let region = self.next_region_var(infer::PatternRegion(pat.span));
+ let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
+ self.demand_eqtype(pat.span, expected, rptr_ty);
+ (rptr_ty, inner_ty)
+ }
+ };
- // Take region, inner-type from expected type if we
- // can, to avoid creating needless variables. This
- // also helps with the bad interactions of the given
- // hack detailed in (*) below.
- let (rptr_ty, inner_ty) = match expected.sty {
- ty::TyRef(_, mt) if mt.mutbl == mutbl => {
- (expected, mt.ty)
- }
+ self.write_ty(pat.id, rptr_ty);
+ self.check_pat(&inner, inner_ty);
+ } else {
+ self.write_error(pat.id);
+ self.check_pat(&inner, tcx.types.err);
+ }
+ }
+ PatKind::Vec(ref before, ref slice, ref after) => {
+ let expected_ty = self.structurally_resolved_type(pat.span, expected);
+ let inner_ty = self.next_ty_var();
+ let pat_ty = match expected_ty.sty {
+ ty::TyArray(_, size) => tcx.mk_array(inner_ty, {
+ let min_len = before.len() + after.len();
+ match *slice {
+ Some(_) => cmp::max(min_len, size),
+ None => min_len
+ }
+ }),
_ => {
- let inner_ty = fcx.infcx().next_ty_var();
- let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
- let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
- demand::eqtype(fcx, pat.span, expected, rptr_ty);
- (rptr_ty, inner_ty)
+ let region = self.next_region_var(infer::PatternRegion(pat.span));
+ tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
+ ty: tcx.mk_slice(inner_ty),
+ mutbl: expected_ty.builtin_deref(true, ty::NoPreference)
+ .map_or(hir::MutImmutable, |mt| mt.mutbl)
+ })
}
};
- fcx.write_ty(pat.id, rptr_ty);
- check_pat(pcx, &inner, inner_ty);
- } else {
- fcx.write_error(pat.id);
- check_pat(pcx, &inner, tcx.types.err);
- }
- }
- PatKind::Vec(ref before, ref slice, ref after) => {
- let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
- let inner_ty = fcx.infcx().next_ty_var();
- let pat_ty = match expected_ty.sty {
- ty::TyArray(_, size) => tcx.mk_array(inner_ty, {
- let min_len = before.len() + after.len();
- match *slice {
- Some(_) => cmp::max(min_len, size),
- None => min_len
- }
- }),
- _ => {
- let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
- ty: tcx.mk_slice(inner_ty),
- mutbl: expected_ty.builtin_deref(true, ty::NoPreference).map(|mt| mt.mutbl)
- .unwrap_or(hir::MutImmutable)
- })
- }
- };
+ self.write_ty(pat.id, pat_ty);
- fcx.write_ty(pat.id, pat_ty);
-
- // `demand::subtype` would be good enough, but using
- // `eqtype` turns out to be equally general. See (*)
- // below for details.
- demand::eqtype(fcx, pat.span, expected, pat_ty);
+ // `demand::subtype` would be good enough, but using
+ // `eqtype` turns out to be equally general. See (*)
+ // below for details.
+ self.demand_eqtype(pat.span, expected, pat_ty);
- for elt in before {
- check_pat(pcx, &elt, inner_ty);
- }
- if let Some(ref slice) = *slice {
- let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let mutbl = expected_ty.builtin_deref(true, ty::NoPreference)
- .map_or(hir::MutImmutable, |mt| mt.mutbl);
+ for elt in before {
+ self.check_pat(&elt, inner_ty);
+ }
+ if let Some(ref slice) = *slice {
+ let region = self.next_region_var(infer::PatternRegion(pat.span));
+ let mutbl = expected_ty.builtin_deref(true, ty::NoPreference)
+ .map_or(hir::MutImmutable, |mt| mt.mutbl);
- let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
- ty: tcx.mk_slice(inner_ty),
- mutbl: mutbl
- });
- check_pat(pcx, &slice, slice_ty);
- }
- for elt in after {
- check_pat(pcx, &elt, inner_ty);
+ let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
+ ty: tcx.mk_slice(inner_ty),
+ mutbl: mutbl
+ });
+ self.check_pat(&slice, slice_ty);
+ }
+ for elt in after {
+ self.check_pat(&elt, inner_ty);
+ }
}
}
- }
-
- // (*) In most of the cases above (literals and constants being
- // the exception), we relate types using strict equality, evewn
- // though subtyping would be sufficient. There are a few reasons
- // for this, some of which are fairly subtle and which cost me
- // (nmatsakis) an hour or two debugging to remember, so I thought
- // I'd write them down this time.
- //
- // 1. There is no loss of expressiveness here, though it does
- // cause some inconvenience. What we are saying is that the type
- // of `x` becomes *exactly* what is expected. This can cause unnecessary
- // errors in some cases, such as this one:
- // it will cause errors in a case like this:
- //
- // ```
- // fn foo<'x>(x: &'x int) {
- // let a = 1;
- // let mut z = x;
- // z = &a;
- // }
- // ```
- //
- // The reason we might get an error is that `z` might be
- // assigned a type like `&'x int`, and then we would have
- // a problem when we try to assign `&a` to `z`, because
- // the lifetime of `&a` (i.e., the enclosing block) is
- // shorter than `'x`.
- //
- // HOWEVER, this code works fine. The reason is that the
- // expected type here is whatever type the user wrote, not
- // the initializer's type. In this case the user wrote
- // nothing, so we are going to create a type variable `Z`.
- // Then we will assign the type of the initializer (`&'x
- // int`) as a subtype of `Z`: `&'x int <: Z`. And hence we
- // will instantiate `Z` as a type `&'0 int` where `'0` is
- // a fresh region variable, with the constraint that `'x :
- // '0`. So basically we're all set.
- //
- // Note that there are two tests to check that this remains true
- // (`regions-reassign-{match,let}-bound-pointer.rs`).
- //
- // 2. Things go horribly wrong if we use subtype. The reason for
- // THIS is a fairly subtle case involving bound regions. See the
- // `givens` field in `region_inference`, as well as the test
- // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
- // for details. Short version is that we must sometimes detect
- // relationships between specific region variables and regions
- // bound in a closure signature, and that detection gets thrown
- // off when we substitute fresh region variables here to enable
- // subtyping.
-}
-fn check_assoc_item_is_const(pcx: &pat_ctxt, def: Def, span: Span) -> bool {
- match def {
- Def::AssociatedConst(..) => true,
- Def::Method(..) => {
- span_err!(pcx.fcx.ccx.tcx.sess, span, E0327,
- "associated items in match patterns must be constants");
- false
- }
- _ => {
- span_bug!(span, "non-associated item in check_assoc_item_is_const");
- }
+ // (*) In most of the cases above (literals and constants being
+ // the exception), we relate types using strict equality, evewn
+ // though subtyping would be sufficient. There are a few reasons
+ // for this, some of which are fairly subtle and which cost me
+ // (nmatsakis) an hour or two debugging to remember, so I thought
+ // I'd write them down this time.
+ //
+ // 1. There is no loss of expressiveness here, though it does
+ // cause some inconvenience. What we are saying is that the type
+ // of `x` becomes *exactly* what is expected. This can cause unnecessary
+ // errors in some cases, such as this one:
+ // it will cause errors in a case like this:
+ //
+ // ```
+ // fn foo<'x>(x: &'x int) {
+ // let a = 1;
+ // let mut z = x;
+ // z = &a;
+ // }
+ // ```
+ //
+ // The reason we might get an error is that `z` might be
+ // assigned a type like `&'x int`, and then we would have
+ // a problem when we try to assign `&a` to `z`, because
+ // the lifetime of `&a` (i.e., the enclosing block) is
+ // shorter than `'x`.
+ //
+ // HOWEVER, this code works fine. The reason is that the
+ // expected type here is whatever type the user wrote, not
+ // the initializer's type. In this case the user wrote
+ // nothing, so we are going to create a type variable `Z`.
+ // Then we will assign the type of the initializer (`&'x
+ // int`) as a subtype of `Z`: `&'x int <: Z`. And hence we
+ // will instantiate `Z` as a type `&'0 int` where `'0` is
+ // a fresh region variable, with the constraint that `'x :
+ // '0`. So basically we're all set.
+ //
+ // Note that there are two tests to check that this remains true
+ // (`regions-reassign-{match,let}-bound-pointer.rs`).
+ //
+ // 2. Things go horribly wrong if we use subtype. The reason for
+ // THIS is a fairly subtle case involving bound regions. See the
+ // `givens` field in `region_inference`, as well as the test
+ // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
+ // for details. Short version is that we must sometimes detect
+ // relationships between specific region variables and regions
+ // bound in a closure signature, and that detection gets thrown
+ // off when we substitute fresh region variables here to enable
+ // subtyping.
}
-}
-pub fn check_dereferencable<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
- span: Span, expected: Ty<'tcx>,
- inner: &hir::Pat) -> bool {
- let fcx = pcx.fcx;
- let tcx = pcx.fcx.ccx.tcx;
- if pat_is_binding(&tcx.def_map.borrow(), inner) {
- let expected = fcx.infcx().shallow_resolve(expected);
- expected.builtin_deref(true, ty::NoPreference).map_or(true, |mt| match mt.ty.sty {
- ty::TyTrait(_) => {
- // This is "x = SomeTrait" being reduced from
- // "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
- span_err!(tcx.sess, span, E0033,
- "type `{}` cannot be dereferenced",
- fcx.infcx().ty_to_string(expected));
+ fn check_assoc_item_is_const(&self, def: Def, span: Span) -> bool {
+ match def {
+ Def::AssociatedConst(..) => true,
+ Def::Method(..) => {
+ span_err!(self.tcx.sess, span, E0327,
+ "associated items in match patterns must be constants");
false
}
- _ => true
- })
- } else {
- true
- }
-}
-
-pub fn check_match<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- discrim: &'tcx hir::Expr,
- arms: &'tcx [hir::Arm],
- expected: Expectation<'tcx>,
- match_src: hir::MatchSource) {
- let tcx = fcx.ccx.tcx;
-
- // Not entirely obvious: if matches may create ref bindings, we
- // want to use the *precise* type of the discriminant, *not* some
- // supertype, as the "discriminant type" (issue #23116).
- let contains_ref_bindings = arms.iter()
- .filter_map(|a| tcx.arm_contains_ref_binding(a))
- .max_by_key(|m| match *m {
- hir::MutMutable => 1,
- hir::MutImmutable => 0,
- });
- let discrim_ty;
- if let Some(m) = contains_ref_bindings {
- check_expr_with_lvalue_pref(fcx, discrim, LvaluePreference::from_mutbl(m));
- discrim_ty = fcx.expr_ty(discrim);
- } else {
- // ...but otherwise we want to use any supertype of the
- // discriminant. This is sort of a workaround, see note (*) in
- // `check_pat` for some details.
- discrim_ty = fcx.infcx().next_ty_var();
- check_expr_has_type(fcx, discrim, discrim_ty);
- };
-
- // Typecheck the patterns first, so that we get types for all the
- // bindings.
- for arm in arms {
- let mut pcx = pat_ctxt {
- fcx: fcx,
- map: pat_id_map(&tcx.def_map, &arm.pats[0]),
- };
- for p in &arm.pats {
- check_pat(&mut pcx, &p, discrim_ty);
+ _ => {
+ span_bug!(span, "non-associated item in check_assoc_item_is_const");
+ }
}
}
- // Now typecheck the blocks.
- //
- // The result of the match is the common supertype of all the
- // arms. Start out the value as bottom, since it's the, well,
- // bottom the type lattice, and we'll be moving up the lattice as
- // we process each arm. (Note that any match with 0 arms is matching
- // on any empty type and is therefore unreachable; should the flow
- // of execution reach it, we will panic, so bottom is an appropriate
- // type in that case)
- let expected = expected.adjust_for_branches(fcx);
- let mut result_ty = fcx.infcx().next_diverging_ty_var();
- let coerce_first = match expected {
- // We don't coerce to `()` so that if the match expression is a
- // statement it's branches can have any consistent type. That allows
- // us to give better error messages (pointing to a usually better
- // arm for inconsistent arms or to the whole match when a `()` type
- // is required).
- Expectation::ExpectHasType(ety) if ety != fcx.tcx().mk_nil() => {
- ety
- }
- _ => result_ty
- };
- for (i, arm) in arms.iter().enumerate() {
- if let Some(ref e) = arm.guard {
- check_expr_has_type(fcx, e, tcx.types.bool);
- }
- check_expr_with_expectation(fcx, &arm.body, expected);
- let arm_ty = fcx.expr_ty(&arm.body);
-
- if result_ty.references_error() || arm_ty.references_error() {
- result_ty = tcx.types.err;
- continue;
+ pub fn check_dereferencable(&self, span: Span, expected: Ty<'tcx>, inner: &hir::Pat) -> bool {
+ let tcx = self.tcx;
+ if pat_is_binding(&tcx.def_map.borrow(), inner) {
+ let expected = self.shallow_resolve(expected);
+ expected.builtin_deref(true, ty::NoPreference).map_or(true, |mt| match mt.ty.sty {
+ ty::TyTrait(_) => {
+ // This is "x = SomeTrait" being reduced from
+ // "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
+ span_err!(tcx.sess, span, E0033,
+ "type `{}` cannot be dereferenced",
+ self.ty_to_string(expected));
+ false
+ }
+ _ => true
+ })
+ } else {
+ true
}
+ }
+}
- // Handle the fallback arm of a desugared if-let like a missing else.
- let is_if_let_fallback = match match_src {
- hir::MatchSource::IfLetDesugar { contains_else_clause: false } => {
- i == arms.len() - 1 && arm_ty.is_nil()
- }
- _ => false
- };
-
- let origin = if is_if_let_fallback {
- TypeOrigin::IfExpressionWithNoElse(expr.span)
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn check_match(&self,
+ expr: &'gcx hir::Expr,
+ discrim: &'gcx hir::Expr,
+ arms: &'gcx [hir::Arm],
+ expected: Expectation<'tcx>,
+ match_src: hir::MatchSource) {
+ let tcx = self.tcx;
+
+ // Not entirely obvious: if matches may create ref bindings, we
+ // want to use the *precise* type of the discriminant, *not* some
+ // supertype, as the "discriminant type" (issue #23116).
+ let contains_ref_bindings = arms.iter()
+ .filter_map(|a| tcx.arm_contains_ref_binding(a))
+ .max_by_key(|m| match *m {
+ hir::MutMutable => 1,
+ hir::MutImmutable => 0,
+ });
+ let discrim_ty;
+ if let Some(m) = contains_ref_bindings {
+ self.check_expr_with_lvalue_pref(discrim, LvaluePreference::from_mutbl(m));
+ discrim_ty = self.expr_ty(discrim);
} else {
- TypeOrigin::MatchExpressionArm(expr.span, arm.body.span, match_src)
+ // ...but otherwise we want to use any supertype of the
+ // discriminant. This is sort of a workaround, see note (*) in
+ // `check_pat` for some details.
+ discrim_ty = self.next_ty_var();
+ self.check_expr_has_type(discrim, discrim_ty);
};
- let result = if is_if_let_fallback {
- fcx.infcx().eq_types(true, origin, arm_ty, result_ty)
- .map(|InferOk { obligations, .. }| {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- arm_ty
- })
- } else if i == 0 {
- // Special-case the first arm, as it has no "previous expressions".
- coercion::try(fcx, &arm.body, coerce_first)
- } else {
- let prev_arms = || arms[..i].iter().map(|arm| &*arm.body);
- coercion::try_find_lub(fcx, origin, prev_arms, result_ty, &arm.body)
- };
+ // Typecheck the patterns first, so that we get types for all the
+ // bindings.
+ for arm in arms {
+ let pcx = PatCtxt {
+ fcx: self,
+ map: pat_id_map(&tcx.def_map, &arm.pats[0]),
+ };
+ for p in &arm.pats {
+ pcx.check_pat(&p, discrim_ty);
+ }
+ }
- result_ty = match result {
- Ok(ty) => ty,
- Err(e) => {
- let (expected, found) = if is_if_let_fallback {
- (arm_ty, result_ty)
- } else {
- (result_ty, arm_ty)
- };
- fcx.infcx().report_mismatched_types(origin, expected, found, e);
- fcx.tcx().types.err
+ // Now typecheck the blocks.
+ //
+ // The result of the match is the common supertype of all the
+ // arms. Start out the value as bottom, since it's the, well,
+ // bottom the type lattice, and we'll be moving up the lattice as
+ // we process each arm. (Note that any match with 0 arms is matching
+ // on any empty type and is therefore unreachable; should the flow
+ // of execution reach it, we will panic, so bottom is an appropriate
+ // type in that case)
+ let expected = expected.adjust_for_branches(self);
+ let mut result_ty = self.next_diverging_ty_var();
+ let coerce_first = match expected {
+ // We don't coerce to `()` so that if the match expression is a
+ // statement it's branches can have any consistent type. That allows
+ // us to give better error messages (pointing to a usually better
+ // arm for inconsistent arms or to the whole match when a `()` type
+ // is required).
+ Expectation::ExpectHasType(ety) if ety != self.tcx.mk_nil() => {
+ ety
}
+ _ => result_ty
};
- }
+ for (i, arm) in arms.iter().enumerate() {
+ if let Some(ref e) = arm.guard {
+ self.check_expr_has_type(e, tcx.types.bool);
+ }
+ self.check_expr_with_expectation(&arm.body, expected);
+ let arm_ty = self.expr_ty(&arm.body);
- fcx.write_ty(expr.id, result_ty);
-}
+ if result_ty.references_error() || arm_ty.references_error() {
+ result_ty = tcx.types.err;
+ continue;
+ }
-pub struct pat_ctxt<'a, 'tcx: 'a> {
- pub fcx: &'a FnCtxt<'a, 'tcx>,
- pub map: PatIdMap,
-}
+ // Handle the fallback arm of a desugared if-let like a missing else.
+ let is_if_let_fallback = match match_src {
+ hir::MatchSource::IfLetDesugar { contains_else_clause: false } => {
+ i == arms.len() - 1 && arm_ty.is_nil()
+ }
+ _ => false
+ };
-pub fn check_pat_struct<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, pat: &'tcx hir::Pat,
- path: &hir::Path, fields: &'tcx [Spanned<hir::FieldPat>],
- etc: bool, expected: Ty<'tcx>) {
- let fcx = pcx.fcx;
- let tcx = pcx.fcx.ccx.tcx;
+ let origin = if is_if_let_fallback {
+ TypeOrigin::IfExpressionWithNoElse(expr.span)
+ } else {
+ TypeOrigin::MatchExpressionArm(expr.span, arm.body.span, match_src)
+ };
- let def = tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
- let variant = match fcx.def_struct_variant(def, path.span) {
- Some((_, variant)) => variant,
- None => {
- let name = pprust::path_to_string(path);
- span_err!(tcx.sess, pat.span, E0163,
- "`{}` does not name a struct or a struct variant", name);
- fcx.write_error(pat.id);
+ let result = if is_if_let_fallback {
+ self.eq_types(true, origin, arm_ty, result_ty)
+ .map(|InferOk { obligations, .. }| {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ arm_ty
+ })
+ } else if i == 0 {
+ // Special-case the first arm, as it has no "previous expressions".
+ self.try_coerce(&arm.body, coerce_first)
+ } else {
+ let prev_arms = || arms[..i].iter().map(|arm| &*arm.body);
+ self.try_find_coercion_lub(origin, prev_arms, result_ty, &arm.body)
+ };
- for field in fields {
- check_pat(pcx, &field.node.pat, tcx.types.err);
- }
- return;
+ result_ty = match result {
+ Ok(ty) => ty,
+ Err(e) => {
+ let (expected, found) = if is_if_let_fallback {
+ (arm_ty, result_ty)
+ } else {
+ (result_ty, arm_ty)
+ };
+ self.report_mismatched_types(origin, expected, found, e);
+ self.tcx.types.err
+ }
+ };
}
- };
-
- let pat_ty = pcx.fcx.instantiate_type(def.def_id(), path);
- let item_substs = match pat_ty.sty {
- ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs,
- _ => span_bug!(pat.span, "struct variant is not an ADT")
- };
- demand::eqtype(fcx, pat.span, expected, pat_ty);
- check_struct_pat_fields(pcx, pat.span, fields, variant, &item_substs, etc);
-
- fcx.write_ty(pat.id, pat_ty);
- fcx.write_substs(pat.id, ty::ItemSubsts { substs: item_substs.clone() });
-}
-// This function exists due to the warning "diagnostic code E0164 already used"
-fn bad_struct_kind_err(sess: &Session, pat: &hir::Pat, path: &hir::Path, lint: bool) {
- let name = pprust::path_to_string(path);
- let msg = format!("`{}` does not name a tuple variant or a tuple struct", name);
- if lint {
- sess.add_lint(lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT,
- pat.id,
- pat.span,
- msg);
- } else {
- span_err!(sess, pat.span, E0164, "{}", msg);
+ self.write_ty(expr.id, result_ty);
}
}
-fn check_pat_enum<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
- pat: &hir::Pat,
- path: &hir::Path,
- subpats: Option<&'tcx [P<hir::Pat>]>,
- expected: Ty<'tcx>,
- is_tuple_struct_pat: bool)
-{
- // Typecheck the path.
- let fcx = pcx.fcx;
- let tcx = pcx.fcx.ccx.tcx;
-
- let path_res = match tcx.def_map.borrow().get(&pat.id) {
- Some(&path_res) if path_res.base_def != Def::Err => path_res,
- _ => {
- fcx.infcx().set_tainted_by_errors();
- fcx.write_error(pat.id);
-
- if let Some(subpats) = subpats {
- for pat in subpats {
- check_pat(pcx, &pat, tcx.types.err);
+impl<'a, 'gcx, 'tcx> PatCtxt<'a, 'gcx, 'tcx> {
+ pub fn check_pat_struct(&self, pat: &'gcx hir::Pat,
+ path: &hir::Path, fields: &'gcx [Spanned<hir::FieldPat>],
+ etc: bool, expected: Ty<'tcx>) {
+ let tcx = self.tcx;
+
+ let def = tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
+ let variant = match self.def_struct_variant(def, path.span) {
+ Some((_, variant)) => variant,
+ None => {
+ let name = pprust::path_to_string(path);
+ span_err!(tcx.sess, pat.span, E0163,
+ "`{}` does not name a struct or a struct variant", name);
+ self.write_error(pat.id);
+
+ for field in fields {
+ self.check_pat(&field.node.pat, tcx.types.err);
}
+ return;
}
+ };
- return;
- }
- };
-
- let (opt_ty, segments, def) = match resolve_ty_and_def_ufcs(fcx, path_res,
- None, path,
- pat.span, pat.id) {
- Some(resolution) => resolution,
- // Error handling done inside resolve_ty_and_def_ufcs, so if
- // resolution fails just return.
- None => {return;}
- };
-
- // Items that were partially resolved before should have been resolved to
- // associated constants (i.e. not methods).
- if path_res.depth != 0 && !check_assoc_item_is_const(pcx, def, pat.span) {
- fcx.write_error(pat.id);
- return;
+ let pat_ty = self.instantiate_type(def.def_id(), path);
+ let item_substs = match pat_ty.sty {
+ ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs,
+ _ => span_bug!(pat.span, "struct variant is not an ADT")
+ };
+ self.demand_eqtype(pat.span, expected, pat_ty);
+ self.check_struct_pat_fields(pat.span, fields, variant, &item_substs, etc);
+
+ self.write_ty(pat.id, pat_ty);
+ self.write_substs(pat.id, ty::ItemSubsts {
+ substs: item_substs
+ });
}
- let enum_def = def.variant_def_ids()
- .map_or_else(|| def.def_id(), |(enum_def, _)| enum_def);
+ fn check_pat_enum(&self,
+ pat: &hir::Pat,
+ path: &hir::Path,
+ subpats: Option<&'gcx [P<hir::Pat>]>,
+ expected: Ty<'tcx>,
+ is_tuple_struct_pat: bool)
+ {
+ // Typecheck the path.
+ let tcx = self.tcx;
+
+ let path_res = match tcx.def_map.borrow().get(&pat.id) {
+ Some(&path_res) if path_res.base_def != Def::Err => path_res,
+ _ => {
+ self.set_tainted_by_errors();
+ self.write_error(pat.id);
+
+ if let Some(subpats) = subpats {
+ for pat in subpats {
+ self.check_pat(&pat, tcx.types.err);
+ }
+ }
- let ctor_scheme = tcx.lookup_item_type(enum_def);
- let ctor_predicates = tcx.lookup_predicates(enum_def);
- let path_scheme = if ctor_scheme.ty.is_fn() {
- let fn_ret = tcx.no_late_bound_regions(&ctor_scheme.ty.fn_ret()).unwrap();
- ty::TypeScheme {
- ty: fn_ret.unwrap(),
- generics: ctor_scheme.generics,
- }
- } else {
- ctor_scheme
- };
- instantiate_path(pcx.fcx, segments,
- path_scheme, &ctor_predicates,
- opt_ty, def, pat.span, pat.id);
-
- let report_bad_struct_kind = |is_warning| {
- bad_struct_kind_err(tcx.sess, pat, path, is_warning);
- if is_warning { return; }
- fcx.write_error(pat.id);
- if let Some(subpats) = subpats {
- for pat in subpats {
- check_pat(pcx, &pat, tcx.types.err);
+ return;
}
- }
- };
-
- // If we didn't have a fully resolved path to start with, we had an
- // associated const, and we should quit now, since the rest of this
- // function uses checks specific to structs and enums.
- if path_res.depth != 0 {
- if is_tuple_struct_pat {
- report_bad_struct_kind(false);
- } else {
- let pat_ty = fcx.node_ty(pat.id);
- demand::suptype(fcx, pat.span, expected, pat_ty);
- }
- return;
- }
+ };
- let pat_ty = fcx.node_ty(pat.id);
- demand::eqtype(fcx, pat.span, expected, pat_ty);
+ let (opt_ty, segments, def) = match self.resolve_ty_and_def_ufcs(path_res,
+ None, path,
+ pat.span, pat.id) {
+ Some(resolution) => resolution,
+ // Error handling done inside resolve_ty_and_def_ufcs, so if
+ // resolution fails just return.
+ None => {return;}
+ };
- let real_path_ty = fcx.node_ty(pat.id);
- let (kind_name, variant, expected_substs) = match real_path_ty.sty {
- ty::TyEnum(enum_def, expected_substs) => {
- let variant = enum_def.variant_of_def(def);
- ("variant", variant, expected_substs)
- }
- ty::TyStruct(struct_def, expected_substs) => {
- let variant = struct_def.struct_variant();
- ("struct", variant, expected_substs)
- }
- _ => {
- report_bad_struct_kind(false);
+ // Items that were partially resolved before should have been resolved to
+ // associated constants (i.e. not methods).
+ if path_res.depth != 0 && !self.check_assoc_item_is_const(def, pat.span) {
+ self.write_error(pat.id);
return;
}
- };
- match (is_tuple_struct_pat, variant.kind()) {
- (true, ty::VariantKind::Unit) => {
- // Matching unit structs with tuple variant patterns (`UnitVariant(..)`)
- // is allowed for backward compatibility.
- report_bad_struct_kind(true);
- }
- (_, ty::VariantKind::Struct) => {
- report_bad_struct_kind(false);
- return
- }
- _ => {}
- }
+ let enum_def = def.variant_def_ids()
+ .map_or_else(|| def.def_id(), |(enum_def, _)| enum_def);
- if let Some(subpats) = subpats {
- if subpats.len() == variant.fields.len() {
- for (subpat, field) in subpats.iter().zip(&variant.fields) {
- let field_ty = fcx.field_ty(subpat.span, field, expected_substs);
- check_pat(pcx, &subpat, field_ty);
+ let ctor_scheme = tcx.lookup_item_type(enum_def);
+ let ctor_predicates = tcx.lookup_predicates(enum_def);
+ let path_scheme = if ctor_scheme.ty.is_fn() {
+ let fn_ret = tcx.no_late_bound_regions(&ctor_scheme.ty.fn_ret()).unwrap();
+ ty::TypeScheme {
+ ty: fn_ret.unwrap(),
+ generics: ctor_scheme.generics,
}
- } else if variant.fields.is_empty() {
- span_err!(tcx.sess, pat.span, E0024,
- "this pattern has {} field{}, but the corresponding {} has no fields",
- subpats.len(), if subpats.len() == 1 {""} else {"s"}, kind_name);
+ } else {
+ ctor_scheme
+ };
+ self.instantiate_path(segments, path_scheme, &ctor_predicates,
+ opt_ty, def, pat.span, pat.id);
- for pat in subpats {
- check_pat(pcx, &pat, tcx.types.err);
+ let report_bad_struct_kind = |is_warning| {
+ bad_struct_kind_err(tcx.sess, pat, path, is_warning);
+ if is_warning { return; }
+ self.write_error(pat.id);
+ if let Some(subpats) = subpats {
+ for pat in subpats {
+ self.check_pat(&pat, tcx.types.err);
+ }
}
- } else {
- span_err!(tcx.sess, pat.span, E0023,
- "this pattern has {} field{}, but the corresponding {} has {} field{}",
- subpats.len(), if subpats.len() == 1 {""} else {"s"},
- kind_name,
- variant.fields.len(), if variant.fields.len() == 1 {""} else {"s"});
+ };
- for pat in subpats {
- check_pat(pcx, &pat, tcx.types.err);
+ // If we didn't have a fully resolved path to start with, we had an
+ // associated const, and we should quit now, since the rest of this
+ // function uses checks specific to structs and enums.
+ if path_res.depth != 0 {
+ if is_tuple_struct_pat {
+ report_bad_struct_kind(false);
+ } else {
+ let pat_ty = self.node_ty(pat.id);
+ self.demand_suptype(pat.span, expected, pat_ty);
}
+ return;
}
- }
-}
-/// `path` is the AST path item naming the type of this struct.
-/// `fields` is the field patterns of the struct pattern.
-/// `struct_fields` describes the type of each field of the struct.
-/// `struct_id` is the ID of the struct.
-/// `etc` is true if the pattern said '...' and false otherwise.
-pub fn check_struct_pat_fields<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
- span: Span,
- fields: &'tcx [Spanned<hir::FieldPat>],
- variant: ty::VariantDef<'tcx>,
- substs: &Substs<'tcx>,
- etc: bool) {
- let tcx = pcx.fcx.ccx.tcx;
-
- // Index the struct fields' types.
- let field_map = variant.fields
- .iter()
- .map(|field| (field.name, field))
- .collect::<FnvHashMap<_, _>>();
-
- // Keep track of which fields have already appeared in the pattern.
- let mut used_fields = FnvHashMap();
-
- // Typecheck each field.
- for &Spanned { node: ref field, span } in fields {
- let field_ty = match used_fields.entry(field.name) {
- Occupied(occupied) => {
- let mut err = struct_span_err!(tcx.sess, span, E0025,
- "field `{}` bound multiple times in the pattern",
- field.name);
- span_note!(&mut err, *occupied.get(),
- "field `{}` previously bound here",
- field.name);
- err.emit();
- tcx.types.err
- }
- Vacant(vacant) => {
- vacant.insert(span);
- field_map.get(&field.name)
- .map(|f| pcx.fcx.field_ty(span, f, substs))
- .unwrap_or_else(|| {
- span_err!(tcx.sess, span, E0026,
- "struct `{}` does not have a field named `{}`",
- tcx.item_path_str(variant.did),
- field.name);
- tcx.types.err
- })
+ let pat_ty = self.node_ty(pat.id);
+ self.demand_eqtype(pat.span, expected, pat_ty);
+
+ let real_path_ty = self.node_ty(pat.id);
+ let (kind_name, variant, expected_substs) = match real_path_ty.sty {
+ ty::TyEnum(enum_def, expected_substs) => {
+ let variant = enum_def.variant_of_def(def);
+ ("variant", variant, expected_substs)
+ }
+ ty::TyStruct(struct_def, expected_substs) => {
+ let variant = struct_def.struct_variant();
+ ("struct", variant, expected_substs)
+ }
+ _ => {
+ report_bad_struct_kind(false);
+ return;
}
};
- check_pat(pcx, &field.pat, field_ty);
+ match (is_tuple_struct_pat, variant.kind()) {
+ (true, ty::VariantKind::Unit) => {
+ // Matching unit structs with tuple variant patterns (`UnitVariant(..)`)
+ // is allowed for backward compatibility.
+ report_bad_struct_kind(true);
+ }
+ (_, ty::VariantKind::Struct) => {
+ report_bad_struct_kind(false);
+ return
+ }
+ _ => {}
+ }
+
+ if let Some(subpats) = subpats {
+ if subpats.len() == variant.fields.len() {
+ for (subpat, field) in subpats.iter().zip(&variant.fields) {
+ let field_ty = self.field_ty(subpat.span, field, expected_substs);
+ self.check_pat(&subpat, field_ty);
+ }
+ } else if variant.fields.is_empty() {
+ span_err!(tcx.sess, pat.span, E0024,
+ "this pattern has {} field{}, but the corresponding {} has no fields",
+ subpats.len(), if subpats.len() == 1 {""} else {"s"}, kind_name);
+
+ for pat in subpats {
+ self.check_pat(&pat, tcx.types.err);
+ }
+ } else {
+ span_err!(tcx.sess, pat.span, E0023,
+ "this pattern has {} field{}, but the corresponding {} has {} field{}",
+ subpats.len(), if subpats.len() == 1 {""} else {"s"},
+ kind_name,
+ variant.fields.len(), if variant.fields.len() == 1 {""} else {"s"});
+
+ for pat in subpats {
+ self.check_pat(&pat, tcx.types.err);
+ }
+ }
+ }
}
- // Report an error if not all the fields were specified.
- if !etc {
- for field in variant.fields
+ /// `path` is the AST path item naming the type of this struct.
+ /// `fields` is the field patterns of the struct pattern.
+ /// `struct_fields` describes the type of each field of the struct.
+ /// `struct_id` is the ID of the struct.
+ /// `etc` is true if the pattern said '...' and false otherwise.
+ pub fn check_struct_pat_fields(&self,
+ span: Span,
+ fields: &'gcx [Spanned<hir::FieldPat>],
+ variant: ty::VariantDef<'tcx>,
+ substs: &Substs<'tcx>,
+ etc: bool) {
+ let tcx = self.tcx;
+
+ // Index the struct fields' types.
+ let field_map = variant.fields
.iter()
- .filter(|field| !used_fields.contains_key(&field.name)) {
- span_err!(tcx.sess, span, E0027,
- "pattern does not mention field `{}`",
- field.name);
+ .map(|field| (field.name, field))
+ .collect::<FnvHashMap<_, _>>();
+
+ // Keep track of which fields have already appeared in the pattern.
+ let mut used_fields = FnvHashMap();
+
+ // Typecheck each field.
+ for &Spanned { node: ref field, span } in fields {
+ let field_ty = match used_fields.entry(field.name) {
+ Occupied(occupied) => {
+ let mut err = struct_span_err!(tcx.sess, span, E0025,
+ "field `{}` bound multiple times \
+ in the pattern",
+ field.name);
+ span_note!(&mut err, *occupied.get(),
+ "field `{}` previously bound here",
+ field.name);
+ err.emit();
+ tcx.types.err
+ }
+ Vacant(vacant) => {
+ vacant.insert(span);
+ field_map.get(&field.name)
+ .map(|f| self.field_ty(span, f, substs))
+ .unwrap_or_else(|| {
+ span_err!(tcx.sess, span, E0026,
+ "struct `{}` does not have a field named `{}`",
+ tcx.item_path_str(variant.did),
+ field.name);
+ tcx.types.err
+ })
+ }
+ };
+
+ self.check_pat(&field.pat, field_ty);
+ }
+
+ // Report an error if not all the fields were specified.
+ if !etc {
+ for field in variant.fields
+ .iter()
+ .filter(|field| !used_fields.contains_key(&field.name)) {
+ span_err!(tcx.sess, span, E0027,
+ "pattern does not mention field `{}`",
+ field.name);
+ }
}
}
}
use syntax::codemap::Span;
//FIXME(@jroesch): Ideally we should be able to drop the fulfillment_cx argument.
-pub fn normalize_associated_types_in<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
- fulfillment_cx: &mut FulfillmentContext<'tcx>,
- span: Span,
- body_id: ast::NodeId,
- value: &T)
- -> T
+pub fn normalize_associated_types_in<'a, 'gcx, 'tcx, T>(
+ infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ fulfillment_cx: &mut FulfillmentContext<'tcx>,
+ span: Span,
+ body_id: ast::NodeId,
+ value: &T) -> T
+
where T : TypeFoldable<'tcx>
{
debug!("normalize_associated_types_in(value={:?})", value);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::autoderef;
-use super::check_argument_types;
-use super::check_expr;
-use super::check_method_argument_types;
-use super::demand;
-use super::DeferredCallResolution;
-use super::err_args;
-use super::Expectation;
-use super::expected_types_for_fn_args;
-use super::FnCtxt;
-use super::method;
-use super::structurally_resolved_type;
-use super::TupleArgumentsFlag;
-use super::UnresolvedTypeAction;
-use super::write_call;
+use super::{DeferredCallResolution, Expectation, FnCtxt,
+ TupleArgumentsFlag, UnresolvedTypeAction};
use CrateCtxt;
use middle::cstore::LOCAL_CRATE;
}
}
-pub fn check_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_expr: &'tcx hir::Expr,
- callee_expr: &'tcx hir::Expr,
- arg_exprs: &'tcx [P<hir::Expr>],
- expected: Expectation<'tcx>)
-{
- check_expr(fcx, callee_expr);
- let original_callee_ty = fcx.expr_ty(callee_expr);
- let (callee_ty, _, result) =
- autoderef(fcx,
- callee_expr.span,
- original_callee_ty,
- || Some(callee_expr),
- UnresolvedTypeAction::Error,
- LvaluePreference::NoPreference,
- |adj_ty, idx| {
- try_overloaded_call_step(fcx, call_expr, callee_expr, adj_ty, idx)
- });
-
- match result {
- None => {
- // this will report an error since original_callee_ty is not a fn
- confirm_builtin_call(fcx, call_expr, original_callee_ty, arg_exprs, expected);
- }
-
- Some(CallStep::Builtin) => {
- confirm_builtin_call(fcx, call_expr, callee_ty, arg_exprs, expected);
- }
-
- Some(CallStep::DeferredClosure(fn_sig)) => {
- confirm_deferred_closure_call(fcx, call_expr, arg_exprs, expected, fn_sig);
- }
-
- Some(CallStep::Overloaded(method_callee)) => {
- confirm_overloaded_call(fcx, call_expr, callee_expr,
- arg_exprs, expected, method_callee);
- }
- }
-}
-
enum CallStep<'tcx> {
Builtin,
DeferredClosure(ty::FnSig<'tcx>),
Overloaded(ty::MethodCallee<'tcx>)
}
-fn try_overloaded_call_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_expr: &'tcx hir::Expr,
- callee_expr: &'tcx hir::Expr,
- adjusted_ty: Ty<'tcx>,
- autoderefs: usize)
- -> Option<CallStep<'tcx>>
-{
- debug!("try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?}, autoderefs={})",
- call_expr,
- adjusted_ty,
- autoderefs);
-
- // If the callee is a bare function or a closure, then we're all set.
- match structurally_resolved_type(fcx, callee_expr.span, adjusted_ty).sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
- fcx.write_autoderef_adjustment(callee_expr.id, autoderefs);
- return Some(CallStep::Builtin);
- }
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn check_call(&self,
+ call_expr: &'gcx hir::Expr,
+ callee_expr: &'gcx hir::Expr,
+ arg_exprs: &'gcx [P<hir::Expr>],
+ expected: Expectation<'tcx>)
+ {
+ self.check_expr(callee_expr);
+ let original_callee_ty = self.expr_ty(callee_expr);
+ let (callee_ty, _, result) =
+ self.autoderef(callee_expr.span,
+ original_callee_ty,
+ || Some(callee_expr),
+ UnresolvedTypeAction::Error,
+ LvaluePreference::NoPreference,
+ |adj_ty, idx| {
+ self.try_overloaded_call_step(call_expr, callee_expr, adj_ty, idx)
+ });
+
+ match result {
+ None => {
+ // this will report an error since original_callee_ty is not a fn
+ self.confirm_builtin_call(call_expr, original_callee_ty, arg_exprs, expected);
+ }
- ty::TyClosure(def_id, ref substs) => {
- assert_eq!(def_id.krate, LOCAL_CRATE);
-
- // Check whether this is a call to a closure where we
- // haven't yet decided on whether the closure is fn vs
- // fnmut vs fnonce. If so, we have to defer further processing.
- if fcx.infcx().closure_kind(def_id).is_none() {
- let closure_ty =
- fcx.infcx().closure_type(def_id, substs);
- let fn_sig =
- fcx.infcx().replace_late_bound_regions_with_fresh_var(call_expr.span,
- infer::FnCall,
- &closure_ty.sig).0;
- fcx.record_deferred_call_resolution(def_id, Box::new(CallResolution {
- call_expr: call_expr,
- callee_expr: callee_expr,
- adjusted_ty: adjusted_ty,
- autoderefs: autoderefs,
- fn_sig: fn_sig.clone(),
- closure_def_id: def_id
- }));
- return Some(CallStep::DeferredClosure(fn_sig));
+ Some(CallStep::Builtin) => {
+ self.confirm_builtin_call(call_expr, callee_ty, arg_exprs, expected);
}
- }
- // Hack: we know that there are traits implementing Fn for &F
- // where F:Fn and so forth. In the particular case of types
- // like `x: &mut FnMut()`, if there is a call `x()`, we would
- // normally translate to `FnMut::call_mut(&mut x, ())`, but
- // that winds up requiring `mut x: &mut FnMut()`. A little
- // over the top. The simplest fix by far is to just ignore
- // this case and deref again, so we wind up with
- // `FnMut::call_mut(&mut *x, ())`.
- ty::TyRef(..) if autoderefs == 0 => {
- return None;
- }
+ Some(CallStep::DeferredClosure(fn_sig)) => {
+ self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, fn_sig);
+ }
- _ => {}
+ Some(CallStep::Overloaded(method_callee)) => {
+ self.confirm_overloaded_call(call_expr, callee_expr,
+ arg_exprs, expected, method_callee);
+ }
+ }
}
- try_overloaded_call_traits(fcx, call_expr, callee_expr, adjusted_ty, autoderefs)
- .map(|method_callee| CallStep::Overloaded(method_callee))
-}
+ fn try_overloaded_call_step(&self,
+ call_expr: &'gcx hir::Expr,
+ callee_expr: &'gcx hir::Expr,
+ adjusted_ty: Ty<'tcx>,
+ autoderefs: usize)
+ -> Option<CallStep<'tcx>>
+ {
+ debug!("try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?}, autoderefs={})",
+ call_expr,
+ adjusted_ty,
+ autoderefs);
+
+ // If the callee is a bare function or a closure, then we're all set.
+ match self.structurally_resolved_type(callee_expr.span, adjusted_ty).sty {
+ ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ self.write_autoderef_adjustment(callee_expr.id, autoderefs);
+ return Some(CallStep::Builtin);
+ }
-fn try_overloaded_call_traits<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_expr: &hir::Expr,
- callee_expr: &hir::Expr,
- adjusted_ty: Ty<'tcx>,
- autoderefs: usize)
- -> Option<ty::MethodCallee<'tcx>>
-{
- // Try the options that are least restrictive on the caller first.
- for &(opt_trait_def_id, method_name) in &[
- (fcx.tcx().lang_items.fn_trait(), token::intern("call")),
- (fcx.tcx().lang_items.fn_mut_trait(), token::intern("call_mut")),
- (fcx.tcx().lang_items.fn_once_trait(), token::intern("call_once")),
- ] {
- let trait_def_id = match opt_trait_def_id {
- Some(def_id) => def_id,
- None => continue,
- };
+ ty::TyClosure(def_id, substs) => {
+ assert_eq!(def_id.krate, LOCAL_CRATE);
+
+ // Check whether this is a call to a closure where we
+ // haven't yet decided on whether the closure is fn vs
+ // fnmut vs fnonce. If so, we have to defer further processing.
+ if self.closure_kind(def_id).is_none() {
+ let closure_ty =
+ self.closure_type(def_id, substs);
+ let fn_sig =
+ self.replace_late_bound_regions_with_fresh_var(call_expr.span,
+ infer::FnCall,
+ &closure_ty.sig).0;
+ self.record_deferred_call_resolution(def_id, Box::new(CallResolution {
+ call_expr: call_expr,
+ callee_expr: callee_expr,
+ adjusted_ty: adjusted_ty,
+ autoderefs: autoderefs,
+ fn_sig: fn_sig.clone(),
+ closure_def_id: def_id
+ }));
+ return Some(CallStep::DeferredClosure(fn_sig));
+ }
+ }
- match method::lookup_in_trait_adjusted(fcx,
- call_expr.span,
- Some(&callee_expr),
- method_name,
- trait_def_id,
- autoderefs,
- false,
- adjusted_ty,
- None) {
- None => continue,
- Some(method_callee) => {
- return Some(method_callee);
+ // Hack: we know that there are traits implementing Fn for &F
+ // where F:Fn and so forth. In the particular case of types
+ // like `x: &mut FnMut()`, if there is a call `x()`, we would
+ // normally translate to `FnMut::call_mut(&mut x, ())`, but
+ // that winds up requiring `mut x: &mut FnMut()`. A little
+ // over the top. The simplest fix by far is to just ignore
+ // this case and deref again, so we wind up with
+ // `FnMut::call_mut(&mut *x, ())`.
+ ty::TyRef(..) if autoderefs == 0 => {
+ return None;
}
+
+ _ => {}
}
- }
- None
-}
+ self.try_overloaded_call_traits(call_expr, callee_expr, adjusted_ty, autoderefs)
+ .map(|method_callee| CallStep::Overloaded(method_callee))
+ }
-fn confirm_builtin_call<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- call_expr: &hir::Expr,
- callee_ty: Ty<'tcx>,
- arg_exprs: &'tcx [P<hir::Expr>],
- expected: Expectation<'tcx>)
-{
- let error_fn_sig;
-
- let fn_sig = match callee_ty.sty {
- ty::TyFnDef(_, _, &ty::BareFnTy {ref sig, ..}) |
- ty::TyFnPtr(&ty::BareFnTy {ref sig, ..}) => {
- sig
+ fn try_overloaded_call_traits(&self,
+ call_expr: &hir::Expr,
+ callee_expr: &hir::Expr,
+ adjusted_ty: Ty<'tcx>,
+ autoderefs: usize)
+ -> Option<ty::MethodCallee<'tcx>>
+ {
+ // Try the options that are least restrictive on the caller first.
+ for &(opt_trait_def_id, method_name) in &[
+ (self.tcx.lang_items.fn_trait(), token::intern("call")),
+ (self.tcx.lang_items.fn_mut_trait(), token::intern("call_mut")),
+ (self.tcx.lang_items.fn_once_trait(), token::intern("call_once")),
+ ] {
+ let trait_def_id = match opt_trait_def_id {
+ Some(def_id) => def_id,
+ None => continue,
+ };
+
+ match self.lookup_method_in_trait_adjusted(call_expr.span,
+ Some(&callee_expr),
+ method_name,
+ trait_def_id,
+ autoderefs,
+ false,
+ adjusted_ty,
+ None) {
+ None => continue,
+ Some(method_callee) => {
+ return Some(method_callee);
+ }
+ }
}
- _ => {
- let mut err = fcx.type_error_struct(call_expr.span, |actual| {
- format!("expected function, found `{}`", actual)
- }, callee_ty, None);
-
- if let hir::ExprCall(ref expr, _) = call_expr.node {
- let tcx = fcx.tcx();
- if let Some(pr) = tcx.def_map.borrow().get(&expr.id) {
- if pr.depth == 0 && pr.base_def != Def::Err {
- if let Some(span) = tcx.map.span_if_local(pr.def_id()) {
- err.span_note(span, "defined here");
+
+ None
+ }
+
+ fn confirm_builtin_call(&self,
+ call_expr: &hir::Expr,
+ callee_ty: Ty<'tcx>,
+ arg_exprs: &'gcx [P<hir::Expr>],
+ expected: Expectation<'tcx>)
+ {
+ let error_fn_sig;
+
+ let fn_sig = match callee_ty.sty {
+ ty::TyFnDef(_, _, &ty::BareFnTy {ref sig, ..}) |
+ ty::TyFnPtr(&ty::BareFnTy {ref sig, ..}) => {
+ sig
+ }
+ _ => {
+ let mut err = self.type_error_struct(call_expr.span, |actual| {
+ format!("expected function, found `{}`", actual)
+ }, callee_ty, None);
+
+ if let hir::ExprCall(ref expr, _) = call_expr.node {
+ let tcx = self.tcx;
+ if let Some(pr) = tcx.def_map.borrow().get(&expr.id) {
+ if pr.depth == 0 && pr.base_def != Def::Err {
+ if let Some(span) = tcx.map.span_if_local(pr.def_id()) {
+ err.span_note(span, "defined here");
+ }
}
}
}
- }
- err.emit();
+ err.emit();
- // This is the "default" function signature, used in case of error.
- // In that case, we check each argument against "error" in order to
- // set up all the node type bindings.
- error_fn_sig = ty::Binder(ty::FnSig {
- inputs: err_args(fcx.tcx(), arg_exprs.len()),
- output: ty::FnConverging(fcx.tcx().types.err),
- variadic: false
- });
+ // This is the "default" function signature, used in case of error.
+ // In that case, we check each argument against "error" in order to
+ // set up all the node type bindings.
+ error_fn_sig = ty::Binder(ty::FnSig {
+ inputs: self.err_args(arg_exprs.len()),
+ output: ty::FnConverging(self.tcx.types.err),
+ variadic: false
+ });
- &error_fn_sig
- }
- };
-
- // Replace any late-bound regions that appear in the function
- // signature with region variables. We also have to
- // renormalize the associated types at this point, since they
- // previously appeared within a `Binder<>` and hence would not
- // have been normalized before.
- let fn_sig =
- fcx.infcx().replace_late_bound_regions_with_fresh_var(call_expr.span,
- infer::FnCall,
- fn_sig).0;
- let fn_sig =
- fcx.normalize_associated_types_in(call_expr.span, &fn_sig);
-
- // Call the generic checker.
- let expected_arg_tys = expected_types_for_fn_args(fcx,
- call_expr.span,
- expected,
- fn_sig.output,
- &fn_sig.inputs);
- check_argument_types(fcx,
- call_expr.span,
- &fn_sig.inputs,
- &expected_arg_tys[..],
- arg_exprs,
- fn_sig.variadic,
- TupleArgumentsFlag::DontTupleArguments);
-
- write_call(fcx, call_expr, fn_sig.output);
-}
+ &error_fn_sig
+ }
+ };
-fn confirm_deferred_closure_call<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- call_expr: &hir::Expr,
- arg_exprs: &'tcx [P<hir::Expr>],
- expected: Expectation<'tcx>,
- fn_sig: ty::FnSig<'tcx>)
-{
- // `fn_sig` is the *signature* of the cosure being called. We
- // don't know the full details yet (`Fn` vs `FnMut` etc), but we
- // do know the types expected for each argument and the return
- // type.
-
- let expected_arg_tys =
- expected_types_for_fn_args(fcx,
- call_expr.span,
- expected,
- fn_sig.output.clone(),
- &fn_sig.inputs);
-
- check_argument_types(fcx,
- call_expr.span,
- &fn_sig.inputs,
- &expected_arg_tys,
- arg_exprs,
- fn_sig.variadic,
- TupleArgumentsFlag::TupleArguments);
-
- write_call(fcx, call_expr, fn_sig.output);
-}
+ // Replace any late-bound regions that appear in the function
+ // signature with region variables. We also have to
+ // renormalize the associated types at this point, since they
+ // previously appeared within a `Binder<>` and hence would not
+ // have been normalized before.
+ let fn_sig =
+ self.replace_late_bound_regions_with_fresh_var(call_expr.span,
+ infer::FnCall,
+ fn_sig).0;
+ let fn_sig =
+ self.normalize_associated_types_in(call_expr.span, &fn_sig);
+
+ // Call the generic checker.
+ let expected_arg_tys = self.expected_types_for_fn_args(call_expr.span,
+ expected,
+ fn_sig.output,
+ &fn_sig.inputs);
+ self.check_argument_types(call_expr.span,
+ &fn_sig.inputs,
+ &expected_arg_tys[..],
+ arg_exprs,
+ fn_sig.variadic,
+ TupleArgumentsFlag::DontTupleArguments);
+
+ self.write_call(call_expr, fn_sig.output);
+ }
-fn confirm_overloaded_call<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_expr: &hir::Expr,
- callee_expr: &'tcx hir::Expr,
- arg_exprs: &'tcx [P<hir::Expr>],
- expected: Expectation<'tcx>,
- method_callee: ty::MethodCallee<'tcx>)
-{
- let output_type =
- check_method_argument_types(fcx,
- call_expr.span,
- method_callee.ty,
- callee_expr,
- arg_exprs,
- TupleArgumentsFlag::TupleArguments,
- expected);
- write_call(fcx, call_expr, output_type);
-
- write_overloaded_call_method_map(fcx, call_expr, method_callee);
-}
+ fn confirm_deferred_closure_call(&self,
+ call_expr: &hir::Expr,
+ arg_exprs: &'gcx [P<hir::Expr>],
+ expected: Expectation<'tcx>,
+ fn_sig: ty::FnSig<'tcx>)
+ {
+ // `fn_sig` is the *signature* of the cosure being called. We
+ // don't know the full details yet (`Fn` vs `FnMut` etc), but we
+ // do know the types expected for each argument and the return
+ // type.
+
+ let expected_arg_tys =
+ self.expected_types_for_fn_args(call_expr.span,
+ expected,
+ fn_sig.output.clone(),
+ &fn_sig.inputs);
+
+ self.check_argument_types(call_expr.span,
+ &fn_sig.inputs,
+ &expected_arg_tys,
+ arg_exprs,
+ fn_sig.variadic,
+ TupleArgumentsFlag::TupleArguments);
+
+ self.write_call(call_expr, fn_sig.output);
+ }
-fn write_overloaded_call_method_map<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_expr: &hir::Expr,
- method_callee: ty::MethodCallee<'tcx>) {
- let method_call = ty::MethodCall::expr(call_expr.id);
- fcx.inh.tables.borrow_mut().method_map.insert(method_call, method_callee);
+ fn confirm_overloaded_call(&self,
+ call_expr: &hir::Expr,
+ callee_expr: &'gcx hir::Expr,
+ arg_exprs: &'gcx [P<hir::Expr>],
+ expected: Expectation<'tcx>,
+ method_callee: ty::MethodCallee<'tcx>)
+ {
+ let output_type =
+ self.check_method_argument_types(call_expr.span,
+ method_callee.ty,
+ callee_expr,
+ arg_exprs,
+ TupleArgumentsFlag::TupleArguments,
+ expected);
+ self.write_call(call_expr, output_type);
+
+ self.write_overloaded_call_method_map(call_expr, method_callee);
+ }
+
+ fn write_overloaded_call_method_map(&self,
+ call_expr: &hir::Expr,
+ method_callee: ty::MethodCallee<'tcx>) {
+ let method_call = ty::MethodCall::expr(call_expr.id);
+ self.tables.borrow_mut().method_map.insert(method_call, method_callee);
+ }
}
#[derive(Debug)]
-struct CallResolution<'tcx> {
- call_expr: &'tcx hir::Expr,
- callee_expr: &'tcx hir::Expr,
+struct CallResolution<'gcx: 'tcx, 'tcx> {
+ call_expr: &'gcx hir::Expr,
+ callee_expr: &'gcx hir::Expr,
adjusted_ty: Ty<'tcx>,
autoderefs: usize,
fn_sig: ty::FnSig<'tcx>,
closure_def_id: DefId,
}
-impl<'tcx> DeferredCallResolution<'tcx> for CallResolution<'tcx> {
- fn resolve<'a>(&mut self, fcx: &FnCtxt<'a,'tcx>) {
+impl<'gcx, 'tcx> DeferredCallResolution<'gcx, 'tcx> for CallResolution<'gcx, 'tcx> {
+ fn resolve<'a>(&mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) {
debug!("DeferredCallResolution::resolve() {:?}",
self);
// we should not be invoked until the closure kind has been
// determined by upvar inference
- assert!(fcx.infcx().closure_kind(self.closure_def_id).is_some());
+ assert!(fcx.closure_kind(self.closure_def_id).is_some());
// We may now know enough to figure out fn vs fnmut etc.
- match try_overloaded_call_traits(fcx, self.call_expr, self.callee_expr,
- self.adjusted_ty, self.autoderefs) {
+ match fcx.try_overloaded_call_traits(self.call_expr, self.callee_expr,
+ self.adjusted_ty, self.autoderefs) {
Some(method_callee) => {
// One problem is that when we get here, we are going
// to have a newly instantiated function signature
// can't because of the annoying need for a TypeTrace.
// (This always bites me, should find a way to
// refactor it.)
- let method_sig = fcx.tcx().no_late_bound_regions(method_callee.ty.fn_sig())
- .unwrap();
+ let method_sig = fcx.tcx.no_late_bound_regions(method_callee.ty.fn_sig())
+ .unwrap();
debug!("attempt_resolution: method_callee={:?}",
method_callee);
for (&method_arg_ty, &self_arg_ty) in
method_sig.inputs[1..].iter().zip(&self.fn_sig.inputs)
{
- demand::eqtype(fcx, self.call_expr.span, self_arg_ty, method_arg_ty);
+ fcx.demand_eqtype(self.call_expr.span, self_arg_ty, method_arg_ty);
}
- let nilty = fcx.tcx().mk_nil();
- demand::eqtype(fcx,
- self.call_expr.span,
- method_sig.output.unwrap_or(nilty),
- self.fn_sig.output.unwrap_or(nilty));
+ let nilty = fcx.tcx.mk_nil();
+ fcx.demand_eqtype(self.call_expr.span,
+ method_sig.output.unwrap_or(nilty),
+ self.fn_sig.output.unwrap_or(nilty));
- write_overloaded_call_method_map(fcx, self.call_expr, method_callee);
+ fcx.write_overloaded_call_method_map(self.call_expr, method_callee);
}
None => {
span_bug!(
//! expression, `e as U2` is not necessarily so (in fact it will only be valid if
//! `U1` coerces to `U2`).
-use super::coercion;
-use super::demand;
use super::FnCtxt;
-use super::structurally_resolved_type;
use lint;
use hir::def_id::DefId;
OfParam(&'tcx ty::ParamTy)
}
-/// Returns the kind of unsize information of t, or None
-/// if t is sized or it is unknown.
-fn unsize_kind<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- t: Ty<'tcx>)
- -> Option<UnsizeKind<'tcx>> {
- match t.sty {
- ty::TySlice(_) | ty::TyStr => Some(UnsizeKind::Length),
- ty::TyTrait(ref tty) => Some(UnsizeKind::Vtable(tty.principal_def_id())),
- ty::TyStruct(def, substs) => {
- // FIXME(arielb1): do some kind of normalization
- match def.struct_variant().fields.last() {
- None => None,
- Some(f) => unsize_kind(fcx, f.ty(fcx.tcx(), substs))
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ /// Returns the kind of unsize information of t, or None
+ /// if t is sized or it is unknown.
+ fn unsize_kind(&self, t: Ty<'tcx>) -> Option<UnsizeKind<'tcx>> {
+ match t.sty {
+ ty::TySlice(_) | ty::TyStr => Some(UnsizeKind::Length),
+ ty::TyTrait(ref tty) => Some(UnsizeKind::Vtable(tty.principal_def_id())),
+ ty::TyStruct(def, substs) => {
+ // FIXME(arielb1): do some kind of normalization
+ match def.struct_variant().fields.last() {
+ None => None,
+ Some(f) => self.unsize_kind(f.ty(self.tcx, substs))
+ }
}
+ // We should really try to normalize here.
+ ty::TyProjection(ref pi) => Some(UnsizeKind::OfProjection(pi)),
+ ty::TyParam(ref p) => Some(UnsizeKind::OfParam(p)),
+ _ => None
}
- // We should really try to normalize here.
- ty::TyProjection(ref pi) => Some(UnsizeKind::OfProjection(pi)),
- ty::TyParam(ref p) => Some(UnsizeKind::OfParam(p)),
- _ => None
}
}
NonScalar,
}
-impl<'tcx> CastCheck<'tcx> {
- pub fn new<'a>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- expr_ty: Ty<'tcx>,
- cast_ty: Ty<'tcx>,
- cast_span: Span,
- span: Span)
- -> Result<CastCheck<'tcx>, ErrorReported> {
+impl<'a, 'gcx, 'tcx> CastCheck<'tcx> {
+ pub fn new(fcx: &FnCtxt<'a, 'gcx, 'tcx>,
+ expr: &'tcx hir::Expr,
+ expr_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ cast_span: Span,
+ span: Span)
+ -> Result<CastCheck<'tcx>, ErrorReported> {
let check = CastCheck {
expr: expr,
expr_ty: expr_ty,
}
}
- fn report_cast_error<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>,
- e: CastError) {
+ fn report_cast_error(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, e: CastError) {
match e {
CastError::NeedViaPtr |
CastError::NeedViaThinPtr |
fcx.type_error_struct(self.span, |actual| {
format!("casting `{}` as `{}` is invalid",
actual,
- fcx.infcx().ty_to_string(self.cast_ty))
+ fcx.ty_to_string(self.cast_ty))
}, self.expr_ty, None)
.help(&format!("cast through {} first", match e {
CastError::NeedViaPtr => "a raw pointer",
.emit();
}
CastError::CastToBool => {
- struct_span_err!(fcx.tcx().sess, self.span, E0054, "cannot cast as `bool`")
+ struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`")
.help("compare with zero instead")
.emit();
}
fcx.type_error_message(self.span, |actual| {
format!("non-scalar cast: `{}` as `{}`",
actual,
- fcx.infcx().ty_to_string(self.cast_ty))
+ fcx.ty_to_string(self.cast_ty))
}, self.expr_ty, None);
}
CastError::IllegalCast => {
fcx.type_error_message(self.span, |actual| {
format!("casting `{}` as `{}` is invalid",
actual,
- fcx.infcx().ty_to_string(self.cast_ty))
+ fcx.ty_to_string(self.cast_ty))
}, self.expr_ty, None);
}
CastError::SizedUnsizedCast => {
fcx.type_error_message(self.span, |actual| {
format!("cannot cast thin pointer `{}` to fat pointer `{}`",
actual,
- fcx.infcx().ty_to_string(self.cast_ty))
+ fcx.ty_to_string(self.cast_ty))
}, self.expr_ty, None)
}
CastError::DifferingKinds => {
fcx.type_error_struct(self.span, |actual| {
format!("casting `{}` as `{}` is invalid",
actual,
- fcx.infcx().ty_to_string(self.cast_ty))
+ fcx.ty_to_string(self.cast_ty))
}, self.expr_ty, None)
.note("vtable kinds may not match")
.emit();
}
}
- fn report_cast_to_unsized_type<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>) {
+ fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) {
if
self.cast_ty.references_error() ||
self.expr_ty.references_error()
return;
}
- let tstr = fcx.infcx().ty_to_string(self.cast_ty);
+ let tstr = fcx.ty_to_string(self.cast_ty);
let mut err = fcx.type_error_struct(self.span, |actual| {
format!("cast to unsized type: `{}` as `{}`", actual, tstr)
}, self.expr_ty, None);
hir::MutImmutable => ""
};
if self.cast_ty.is_trait() {
- match fcx.tcx().sess.codemap().span_to_snippet(self.cast_span) {
+ match fcx.tcx.sess.codemap().span_to_snippet(self.cast_span) {
Ok(s) => {
err.span_suggestion(self.cast_span,
"try casting to a reference instead:",
}
}
ty::TyBox(..) => {
- match fcx.tcx().sess.codemap().span_to_snippet(self.cast_span) {
+ match fcx.tcx.sess.codemap().span_to_snippet(self.cast_span) {
Ok(s) => {
err.span_suggestion(self.cast_span,
"try casting to a `Box` instead:",
err.emit();
}
- fn trivial_cast_lint<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) {
let t_cast = self.cast_ty;
let t_expr = self.expr_ty;
if t_cast.is_numeric() && t_expr.is_numeric() {
- fcx.tcx().sess.add_lint(lint::builtin::TRIVIAL_NUMERIC_CASTS,
- self.expr.id,
- self.span,
- format!("trivial numeric cast: `{}` as `{}`. Cast can be \
- replaced by coercion, this might require type \
- ascription or a temporary variable",
- fcx.infcx().ty_to_string(t_expr),
- fcx.infcx().ty_to_string(t_cast)));
+ fcx.tcx.sess.add_lint(lint::builtin::TRIVIAL_NUMERIC_CASTS,
+ self.expr.id,
+ self.span,
+ format!("trivial numeric cast: `{}` as `{}`. Cast can be \
+ replaced by coercion, this might require type \
+ ascription or a temporary variable",
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)));
} else {
- fcx.tcx().sess.add_lint(lint::builtin::TRIVIAL_CASTS,
- self.expr.id,
- self.span,
- format!("trivial cast: `{}` as `{}`. Cast can be \
- replaced by coercion, this might require type \
- ascription or a temporary variable",
- fcx.infcx().ty_to_string(t_expr),
- fcx.infcx().ty_to_string(t_cast)));
+ fcx.tcx.sess.add_lint(lint::builtin::TRIVIAL_CASTS,
+ self.expr.id,
+ self.span,
+ format!("trivial cast: `{}` as `{}`. Cast can be \
+ replaced by coercion, this might require type \
+ ascription or a temporary variable",
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)));
}
}
- pub fn check<'a>(mut self, fcx: &FnCtxt<'a, 'tcx>) {
- self.expr_ty = structurally_resolved_type(fcx, self.span, self.expr_ty);
- self.cast_ty = structurally_resolved_type(fcx, self.span, self.cast_ty);
+ pub fn check(mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) {
+ self.expr_ty = fcx.structurally_resolved_type(self.span, self.expr_ty);
+ self.cast_ty = fcx.structurally_resolved_type(self.span, self.cast_ty);
debug!("check_cast({}, {:?} as {:?})", self.expr.id, self.expr_ty,
self.cast_ty);
} else if self.try_coercion_cast(fcx) {
self.trivial_cast_lint(fcx);
debug!(" -> CoercionCast");
- fcx.tcx().cast_kinds.borrow_mut().insert(self.expr.id,
- CastKind::CoercionCast);
+ fcx.tcx.cast_kinds.borrow_mut().insert(self.expr.id,
+ CastKind::CoercionCast);
} else { match self.do_check(fcx) {
Ok(k) => {
debug!(" -> {:?}", k);
- fcx.tcx().cast_kinds.borrow_mut().insert(self.expr.id, k);
+ fcx.tcx.cast_kinds.borrow_mut().insert(self.expr.id, k);
}
Err(e) => self.report_cast_error(fcx, e)
};}
/// Check a cast, and report an error if one exists. In some cases, this
/// can return Ok and create type errors in the fcx rather than returning
/// directly. coercion-cast is handled in check instead of here.
- fn do_check<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<CastKind, CastError> {
+ fn do_check(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Result<CastKind, CastError> {
use rustc::ty::cast::IntTy::*;
use rustc::ty::cast::CastTy::*;
(None, Some(t_cast)) => {
if let ty::TyFnDef(_, _, f) = self.expr_ty.sty {
// Attempt a coercion to a fn pointer type.
- let res = coercion::try(fcx, self.expr,
- fcx.tcx().mk_ty(ty::TyFnPtr(f)));
+ let res = fcx.try_coerce(self.expr, fcx.tcx.mk_fn_ptr(f));
if !res.is_ok() {
return Err(CastError::NonScalar);
}
}
}
- fn check_ptr_ptr_cast<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>,
- m_expr: &'tcx ty::TypeAndMut<'tcx>,
- m_cast: &'tcx ty::TypeAndMut<'tcx>)
- -> Result<CastKind, CastError>
+ fn check_ptr_ptr_cast(&self,
+ fcx: &FnCtxt<'a, 'gcx, 'tcx>,
+ m_expr: &'tcx ty::TypeAndMut<'tcx>,
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
+ -> Result<CastKind, CastError>
{
debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}",
m_expr, m_cast);
}
// vtable kinds must match
- match (unsize_kind(fcx, m_cast.ty), unsize_kind(fcx, m_expr.ty)) {
+ match (fcx.unsize_kind(m_cast.ty), fcx.unsize_kind(m_expr.ty)) {
(Some(a), Some(b)) if a == b => Ok(CastKind::PtrPtrCast),
_ => Err(CastError::DifferingKinds)
}
}
- fn check_fptr_ptr_cast<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>,
- m_cast: &'tcx ty::TypeAndMut<'tcx>)
- -> Result<CastKind, CastError>
+ fn check_fptr_ptr_cast(&self,
+ fcx: &FnCtxt<'a, 'gcx, 'tcx>,
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
+ -> Result<CastKind, CastError>
{
// fptr-ptr cast. must be to sized ptr
}
}
- fn check_ptr_addr_cast<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>,
- m_expr: &'tcx ty::TypeAndMut<'tcx>)
- -> Result<CastKind, CastError>
+ fn check_ptr_addr_cast(&self,
+ fcx: &FnCtxt<'a, 'gcx, 'tcx>,
+ m_expr: &'tcx ty::TypeAndMut<'tcx>)
+ -> Result<CastKind, CastError>
{
// ptr-addr cast. must be from sized ptr
}
}
- fn check_ref_cast<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>,
- m_expr: &'tcx ty::TypeAndMut<'tcx>,
- m_cast: &'tcx ty::TypeAndMut<'tcx>)
- -> Result<CastKind, CastError>
+ fn check_ref_cast(&self,
+ fcx: &FnCtxt<'a, 'gcx, 'tcx>,
+ m_expr: &'tcx ty::TypeAndMut<'tcx>,
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
+ -> Result<CastKind, CastError>
{
// array-ptr-cast.
// from a region pointer to a vector.
// this will report a type mismatch if needed
- demand::eqtype(fcx, self.span, ety, m_cast.ty);
+ fcx.demand_eqtype(self.span, ety, m_cast.ty);
return Ok(CastKind::ArrayPtrCast);
}
}
Err(CastError::IllegalCast)
}
- fn check_addr_ptr_cast<'a>(&self,
- fcx: &FnCtxt<'a, 'tcx>,
- m_cast: &'tcx ty::TypeAndMut<'tcx>)
- -> Result<CastKind, CastError>
+ fn check_addr_ptr_cast(&self,
+ fcx: &FnCtxt<'a, 'gcx, 'tcx>,
+ m_cast: &'tcx ty::TypeAndMut<'tcx>)
+ -> Result<CastKind, CastError>
{
// ptr-addr cast. pointer must be thin.
if fcx.type_is_known_to_be_sized(m_cast.ty, self.span) {
}
}
- fn try_coercion_cast<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> bool {
- coercion::try(fcx, self.expr, self.cast_ty).is_ok()
+ fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> bool {
+ fcx.try_coerce(self.expr, self.cast_ty).is_ok()
}
}
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
fn type_is_known_to_be_sized(&self,
ty: Ty<'tcx>,
span: Span)
-> bool
{
- traits::type_known_to_meet_builtin_bound(self.infcx(),
- ty,
- ty::BoundSized,
- span)
+ traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundSized, span)
}
}
use super::{check_fn, Expectation, FnCtxt};
-use astconv;
+use astconv::AstConv;
use rustc::ty::subst;
use rustc::ty::{self, ToPolyTraitRef, Ty};
use std::cmp;
use syntax::abi::Abi;
use rustc::hir;
-pub fn check_expr_closure<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- expr: &hir::Expr,
- _capture: hir::CaptureClause,
- decl: &'tcx hir::FnDecl,
- body: &'tcx hir::Block,
- expected: Expectation<'tcx>) {
- debug!("check_expr_closure(expr={:?},expected={:?})",
- expr,
- expected);
-
- // It's always helpful for inference if we know the kind of
- // closure sooner rather than later, so first examine the expected
- // type, and see if can glean a closure kind from there.
- let (expected_sig,expected_kind) = match expected.to_option(fcx) {
- Some(ty) => deduce_expectations_from_expected_type(fcx, ty),
- None => (None, None)
- };
- check_closure(fcx, expr, expected_kind, decl, body, expected_sig)
-}
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn check_expr_closure(&self,
+ expr: &hir::Expr,
+ _capture: hir::CaptureClause,
+ decl: &'gcx hir::FnDecl,
+ body: &'gcx hir::Block,
+ expected: Expectation<'tcx>) {
+ debug!("check_expr_closure(expr={:?},expected={:?})",
+ expr,
+ expected);
+
+ // It's always helpful for inference if we know the kind of
+ // closure sooner rather than later, so first examine the expected
+ // type, and see if can glean a closure kind from there.
+ let (expected_sig,expected_kind) = match expected.to_option(self) {
+ Some(ty) => self.deduce_expectations_from_expected_type(ty),
+ None => (None, None)
+ };
+ self.check_closure(expr, expected_kind, decl, body, expected_sig)
+ }
-fn check_closure<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- expr: &hir::Expr,
- opt_kind: Option<ty::ClosureKind>,
- decl: &'tcx hir::FnDecl,
- body: &'tcx hir::Block,
- expected_sig: Option<ty::FnSig<'tcx>>) {
- let expr_def_id = fcx.tcx().map.local_def_id(expr.id);
-
- debug!("check_closure opt_kind={:?} expected_sig={:?}",
- opt_kind,
- expected_sig);
-
- let mut fn_ty = astconv::ty_of_closure(fcx,
- hir::Unsafety::Normal,
- decl,
- Abi::RustCall,
- expected_sig);
-
- // Create type variables (for now) to represent the transformed
- // types of upvars. These will be unified during the upvar
- // inference phase (`upvar.rs`).
- let num_upvars = fcx.tcx().with_freevars(expr.id, |fv| fv.len());
- let upvar_tys = fcx.infcx().next_ty_vars(num_upvars);
-
- debug!("check_closure: expr.id={:?} upvar_tys={:?}",
- expr.id, upvar_tys);
-
- let closure_type =
- fcx.ccx.tcx.mk_closure(
- expr_def_id,
- fcx.ccx.tcx.mk_substs(fcx.inh.infcx.parameter_environment.free_substs.clone()),
+ fn check_closure(&self,
+ expr: &hir::Expr,
+ opt_kind: Option<ty::ClosureKind>,
+ decl: &'gcx hir::FnDecl,
+ body: &'gcx hir::Block,
+ expected_sig: Option<ty::FnSig<'tcx>>) {
+ let expr_def_id = self.tcx.map.local_def_id(expr.id);
+
+ debug!("check_closure opt_kind={:?} expected_sig={:?}",
+ opt_kind,
+ expected_sig);
+
+ let mut fn_ty = AstConv::ty_of_closure(self,
+ hir::Unsafety::Normal,
+ decl,
+ Abi::RustCall,
+ expected_sig);
+
+ // Create type variables (for now) to represent the transformed
+ // types of upvars. These will be unified during the upvar
+ // inference phase (`upvar.rs`).
+ let num_upvars = self.tcx.with_freevars(expr.id, |fv| fv.len());
+ let upvar_tys = self.next_ty_vars(num_upvars);
+
+ debug!("check_closure: expr.id={:?} upvar_tys={:?}",
+ expr.id, upvar_tys);
+
+ let closure_type = self.tcx.mk_closure(expr_def_id,
+ self.parameter_environment.free_substs,
upvar_tys);
- fcx.write_ty(expr.id, closure_type);
-
- let fn_sig = fcx.tcx().liberate_late_bound_regions(
- fcx.tcx().region_maps.call_site_extent(expr.id, body.id), &fn_ty.sig);
-
- check_fn(fcx.ccx,
- hir::Unsafety::Normal,
- expr.id,
- &fn_sig,
- decl,
- expr.id,
- &body,
- fcx.inh);
-
- // Tuple up the arguments and insert the resulting function type into
- // the `closures` table.
- fn_ty.sig.0.inputs = vec![fcx.tcx().mk_tup(fn_ty.sig.0.inputs)];
-
- debug!("closure for {:?} --> sig={:?} opt_kind={:?}",
- expr_def_id,
- fn_ty.sig,
- opt_kind);
-
- fcx.inh.tables.borrow_mut().closure_tys.insert(expr_def_id, fn_ty);
- match opt_kind {
- Some(kind) => { fcx.inh.tables.borrow_mut().closure_kinds.insert(expr_def_id, kind); }
- None => { }
- }
-}
+ self.write_ty(expr.id, closure_type);
-fn deduce_expectations_from_expected_type<'a,'tcx>(
- fcx: &FnCtxt<'a,'tcx>,
- expected_ty: Ty<'tcx>)
- -> (Option<ty::FnSig<'tcx>>,Option<ty::ClosureKind>)
-{
- debug!("deduce_expectations_from_expected_type(expected_ty={:?})",
- expected_ty);
-
- match expected_ty.sty {
- ty::TyTrait(ref object_type) => {
- let proj_bounds = object_type.projection_bounds_with_self_ty(fcx.tcx(),
- fcx.tcx().types.err);
- let sig = proj_bounds.iter()
- .filter_map(|pb| deduce_sig_from_projection(fcx, pb))
- .next();
- let kind = fcx.tcx().lang_items.fn_trait_kind(object_type.principal_def_id());
- (sig, kind)
- }
- ty::TyInfer(ty::TyVar(vid)) => {
- deduce_expectations_from_obligations(fcx, vid)
- }
- _ => {
- (None, None)
+ let fn_sig = self.tcx.liberate_late_bound_regions(
+ self.tcx.region_maps.call_site_extent(expr.id, body.id), &fn_ty.sig);
+
+ check_fn(self, hir::Unsafety::Normal, expr.id, &fn_sig, decl, expr.id, &body);
+
+ // Tuple up the arguments and insert the resulting function type into
+ // the `closures` table.
+ fn_ty.sig.0.inputs = vec![self.tcx.mk_tup(fn_ty.sig.0.inputs)];
+
+ debug!("closure for {:?} --> sig={:?} opt_kind={:?}",
+ expr_def_id,
+ fn_ty.sig,
+ opt_kind);
+
+ self.tables.borrow_mut().closure_tys.insert(expr_def_id, fn_ty);
+ match opt_kind {
+ Some(kind) => { self.tables.borrow_mut().closure_kinds.insert(expr_def_id, kind); }
+ None => { }
}
}
-}
-fn deduce_expectations_from_obligations<'a,'tcx>(
- fcx: &FnCtxt<'a,'tcx>,
- expected_vid: ty::TyVid)
- -> (Option<ty::FnSig<'tcx>>, Option<ty::ClosureKind>)
-{
- let fulfillment_cx = fcx.inh.fulfillment_cx.borrow();
- // Here `expected_ty` is known to be a type inference variable.
-
- let expected_sig =
- fulfillment_cx
- .pending_obligations()
- .iter()
- .map(|obligation| &obligation.obligation)
- .filter_map(|obligation| {
- debug!("deduce_expectations_from_obligations: obligation.predicate={:?}",
- obligation.predicate);
-
- match obligation.predicate {
- // Given a Projection predicate, we can potentially infer
- // the complete signature.
- ty::Predicate::Projection(ref proj_predicate) => {
- let trait_ref = proj_predicate.to_poly_trait_ref();
- self_type_matches_expected_vid(fcx, trait_ref, expected_vid)
- .and_then(|_| deduce_sig_from_projection(fcx, proj_predicate))
- }
- _ => {
- None
- }
+ fn deduce_expectations_from_expected_type(&self, expected_ty: Ty<'tcx>)
+ -> (Option<ty::FnSig<'tcx>>,Option<ty::ClosureKind>)
+ {
+ debug!("deduce_expectations_from_expected_type(expected_ty={:?})",
+ expected_ty);
+
+ match expected_ty.sty {
+ ty::TyTrait(ref object_type) => {
+ let proj_bounds = object_type.projection_bounds_with_self_ty(self.tcx,
+ self.tcx.types.err);
+ let sig = proj_bounds.iter()
+ .filter_map(|pb| self.deduce_sig_from_projection(pb))
+ .next();
+ let kind = self.tcx.lang_items.fn_trait_kind(object_type.principal_def_id());
+ (sig, kind)
}
- })
- .next();
-
- // Even if we can't infer the full signature, we may be able to
- // infer the kind. This can occur if there is a trait-reference
- // like `F : Fn<A>`. Note that due to subtyping we could encounter
- // many viable options, so pick the most restrictive.
- let expected_kind =
- fulfillment_cx
- .pending_obligations()
- .iter()
- .map(|obligation| &obligation.obligation)
- .filter_map(|obligation| {
- let opt_trait_ref = match obligation.predicate {
- ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref()),
- ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()),
- ty::Predicate::Equate(..) => None,
- ty::Predicate::RegionOutlives(..) => None,
- ty::Predicate::TypeOutlives(..) => None,
- ty::Predicate::WellFormed(..) => None,
- ty::Predicate::ObjectSafe(..) => None,
- ty::Predicate::Rfc1592(..) => None,
-
- // NB: This predicate is created by breaking down a
- // `ClosureType: FnFoo()` predicate, where
- // `ClosureType` represents some `TyClosure`. It can't
- // possibly be referring to the current closure,
- // because we haven't produced the `TyClosure` for
- // this closure yet; this is exactly why the other
- // code is looking for a self type of a unresolved
- // inference variable.
- ty::Predicate::ClosureKind(..) => None,
- };
- opt_trait_ref
- .and_then(|trait_ref| self_type_matches_expected_vid(fcx, trait_ref, expected_vid))
- .and_then(|trait_ref| fcx.tcx().lang_items.fn_trait_kind(trait_ref.def_id()))
- })
- .fold(None, pick_most_restrictive_closure_kind);
-
- (expected_sig, expected_kind)
-}
+ ty::TyInfer(ty::TyVar(vid)) => {
+ self.deduce_expectations_from_obligations(vid)
+ }
+ _ => {
+ (None, None)
+ }
+ }
+ }
-fn pick_most_restrictive_closure_kind(best: Option<ty::ClosureKind>,
- cur: ty::ClosureKind)
- -> Option<ty::ClosureKind>
-{
- match best {
- None => Some(cur),
- Some(best) => Some(cmp::min(best, cur))
+ fn deduce_expectations_from_obligations(&self, expected_vid: ty::TyVid)
+ -> (Option<ty::FnSig<'tcx>>, Option<ty::ClosureKind>)
+ {
+ let fulfillment_cx = self.fulfillment_cx.borrow();
+ // Here `expected_ty` is known to be a type inference variable.
+
+ let expected_sig =
+ fulfillment_cx
+ .pending_obligations()
+ .iter()
+ .map(|obligation| &obligation.obligation)
+ .filter_map(|obligation| {
+ debug!("deduce_expectations_from_obligations: obligation.predicate={:?}",
+ obligation.predicate);
+
+ match obligation.predicate {
+ // Given a Projection predicate, we can potentially infer
+ // the complete signature.
+ ty::Predicate::Projection(ref proj_predicate) => {
+ let trait_ref = proj_predicate.to_poly_trait_ref();
+ self.self_type_matches_expected_vid(trait_ref, expected_vid)
+ .and_then(|_| self.deduce_sig_from_projection(proj_predicate))
+ }
+ _ => {
+ None
+ }
+ }
+ })
+ .next();
+
+ // Even if we can't infer the full signature, we may be able to
+ // infer the kind. This can occur if there is a trait-reference
+ // like `F : Fn<A>`. Note that due to subtyping we could encounter
+ // many viable options, so pick the most restrictive.
+ let expected_kind =
+ fulfillment_cx
+ .pending_obligations()
+ .iter()
+ .map(|obligation| &obligation.obligation)
+ .filter_map(|obligation| {
+ let opt_trait_ref = match obligation.predicate {
+ ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref()),
+ ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()),
+ ty::Predicate::Equate(..) => None,
+ ty::Predicate::RegionOutlives(..) => None,
+ ty::Predicate::TypeOutlives(..) => None,
+ ty::Predicate::WellFormed(..) => None,
+ ty::Predicate::ObjectSafe(..) => None,
+ ty::Predicate::Rfc1592(..) => None,
+
+ // NB: This predicate is created by breaking down a
+ // `ClosureType: FnFoo()` predicate, where
+ // `ClosureType` represents some `TyClosure`. It can't
+ // possibly be referring to the current closure,
+ // because we haven't produced the `TyClosure` for
+ // this closure yet; this is exactly why the other
+ // code is looking for a self type of a unresolved
+ // inference variable.
+ ty::Predicate::ClosureKind(..) => None,
+ };
+ opt_trait_ref
+ .and_then(|tr| self.self_type_matches_expected_vid(tr, expected_vid))
+ .and_then(|tr| self.tcx.lang_items.fn_trait_kind(tr.def_id()))
+ })
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+
+ (expected_sig, expected_kind)
}
-}
-/// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce
-/// everything we need to know about a closure.
-fn deduce_sig_from_projection<'a,'tcx>(
- fcx: &FnCtxt<'a,'tcx>,
- projection: &ty::PolyProjectionPredicate<'tcx>)
- -> Option<ty::FnSig<'tcx>>
-{
- let tcx = fcx.tcx();
+ /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce
+ /// everything we need to know about a closure.
+ fn deduce_sig_from_projection(&self,
+ projection: &ty::PolyProjectionPredicate<'tcx>)
+ -> Option<ty::FnSig<'tcx>>
+ {
+ let tcx = self.tcx;
- debug!("deduce_sig_from_projection({:?})",
- projection);
+ debug!("deduce_sig_from_projection({:?})",
+ projection);
- let trait_ref = projection.to_poly_trait_ref();
+ let trait_ref = projection.to_poly_trait_ref();
- if tcx.lang_items.fn_trait_kind(trait_ref.def_id()).is_none() {
- return None;
- }
+ if tcx.lang_items.fn_trait_kind(trait_ref.def_id()).is_none() {
+ return None;
+ }
- let arg_param_ty = *trait_ref.substs().types.get(subst::TypeSpace, 0);
- let arg_param_ty = fcx.infcx().resolve_type_vars_if_possible(&arg_param_ty);
- debug!("deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty);
+ let arg_param_ty = *trait_ref.substs().types.get(subst::TypeSpace, 0);
+ let arg_param_ty = self.resolve_type_vars_if_possible(&arg_param_ty);
+ debug!("deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty);
- let input_tys = match arg_param_ty.sty {
- ty::TyTuple(ref tys) => { (*tys).clone() }
- _ => { return None; }
- };
- debug!("deduce_sig_from_projection: input_tys {:?}", input_tys);
+ let input_tys = match arg_param_ty.sty {
+ ty::TyTuple(tys) => tys.to_vec(),
+ _ => { return None; }
+ };
+ debug!("deduce_sig_from_projection: input_tys {:?}", input_tys);
- let ret_param_ty = projection.0.ty;
- let ret_param_ty = fcx.infcx().resolve_type_vars_if_possible(&ret_param_ty);
- debug!("deduce_sig_from_projection: ret_param_ty {:?}", ret_param_ty);
+ let ret_param_ty = projection.0.ty;
+ let ret_param_ty = self.resolve_type_vars_if_possible(&ret_param_ty);
+ debug!("deduce_sig_from_projection: ret_param_ty {:?}", ret_param_ty);
- let fn_sig = ty::FnSig {
- inputs: input_tys,
- output: ty::FnConverging(ret_param_ty),
- variadic: false
- };
- debug!("deduce_sig_from_projection: fn_sig {:?}", fn_sig);
+ let fn_sig = ty::FnSig {
+ inputs: input_tys,
+ output: ty::FnConverging(ret_param_ty),
+ variadic: false
+ };
+ debug!("deduce_sig_from_projection: fn_sig {:?}", fn_sig);
- Some(fn_sig)
-}
+ Some(fn_sig)
+ }
-fn self_type_matches_expected_vid<'a,'tcx>(
- fcx: &FnCtxt<'a,'tcx>,
- trait_ref: ty::PolyTraitRef<'tcx>,
- expected_vid: ty::TyVid)
- -> Option<ty::PolyTraitRef<'tcx>>
-{
- let self_ty = fcx.infcx().shallow_resolve(trait_ref.self_ty());
- debug!("self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})",
- trait_ref,
- self_ty);
- match self_ty.sty {
- ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref),
- _ => None,
+ fn self_type_matches_expected_vid(&self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ expected_vid: ty::TyVid)
+ -> Option<ty::PolyTraitRef<'tcx>>
+ {
+ let self_ty = self.shallow_resolve(trait_ref.self_ty());
+ debug!("self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})",
+ trait_ref,
+ self_ty);
+ match self_ty.sty {
+ ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref),
+ _ => None,
+ }
}
}
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
-use check::{autoderef, FnCtxt, UnresolvedTypeAction};
+use check::{FnCtxt, UnresolvedTypeAction};
+use rustc::hir;
use rustc::infer::{Coercion, InferOk, TypeOrigin, TypeTrace};
use rustc::traits::{self, ObligationCause};
-use rustc::traits::{predicate_for_trait_def, report_selection_error};
use rustc::ty::adjustment::{AutoAdjustment, AutoDerefRef, AdjustDerefRef};
use rustc::ty::adjustment::{AutoPtr, AutoUnsafe, AdjustReifyFnPointer};
use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use rustc::ty::{self, LvaluePreference, TypeAndMut, Ty, TyCtxt};
+use rustc::ty::{self, LvaluePreference, TypeAndMut, Ty};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::error::TypeError;
use rustc::ty::relate::RelateResult;
use std::cell::RefCell;
use std::collections::VecDeque;
-use rustc::hir;
+use std::ops::Deref;
-struct Coerce<'a, 'tcx: 'a> {
- fcx: &'a FnCtxt<'a, 'tcx>,
+struct Coerce<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
origin: TypeOrigin,
use_lub: bool,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
+impl<'a, 'gcx, 'tcx> Deref for Coerce<'a, 'gcx, 'tcx> {
+ type Target = FnCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.fcx
+ }
+}
+
type CoerceResult<'tcx> = RelateResult<'tcx, (Ty<'tcx>, AutoAdjustment<'tcx>)>;
fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability,
}
}
-impl<'f, 'tcx> Coerce<'f, 'tcx> {
- fn new(fcx: &'f FnCtxt<'f, 'tcx>, origin: TypeOrigin) -> Self {
+impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> {
+ fn new(fcx: &'f FnCtxt<'f, 'gcx, 'tcx>, origin: TypeOrigin) -> Self {
Coerce {
fcx: fcx,
origin: origin,
}
}
- fn tcx(&self) -> &TyCtxt<'tcx> {
- self.fcx.tcx()
- }
-
fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
- let infcx = self.fcx.infcx();
- infcx.commit_if_ok(|_| {
+ self.commit_if_ok(|_| {
let trace = TypeTrace::types(self.origin, false, a, b);
if self.use_lub {
- infcx.lub(false, trace, &a, &b)
+ self.lub(false, trace, &a, &b)
.map(|InferOk { value, obligations }| {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
value
})
} else {
- infcx.sub(false, trace, &a, &b)
+ self.sub(false, trace, &a, &b)
.map(|InferOk { value, obligations }| {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
where E: Fn() -> I,
I: IntoIterator<Item=&'a hir::Expr> {
- let a = self.fcx.infcx().shallow_resolve(a);
+ let a = self.shallow_resolve(a);
debug!("Coerce.tys({:?} => {:?})", a, b);
// Just ignore error types.
let lvalue_pref = LvaluePreference::from_mutbl(mt_b.mutbl);
let mut first_error = None;
let mut r_borrow_var = None;
- let (_, autoderefs, success) = autoderef(self.fcx, span, a, exprs,
- UnresolvedTypeAction::Ignore,
- lvalue_pref,
- |referent_ty, autoderef|
+ let (_, autoderefs, success) = self.autoderef(span, a, exprs,
+ UnresolvedTypeAction::Ignore,
+ lvalue_pref,
+ |referent_ty, autoderef|
{
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
} else {
if r_borrow_var.is_none() { // create var lazilly, at most once
let coercion = Coercion(span);
- let r = self.fcx.infcx().next_region_var(coercion);
- r_borrow_var = Some(self.tcx().mk_region(r)); // [4] above
+ let r = self.next_region_var(coercion);
+ r_borrow_var = Some(self.tcx.mk_region(r)); // [4] above
}
r_borrow_var.unwrap()
};
- let derefd_ty_a = self.tcx().mk_ref(r, TypeAndMut {
+ let derefd_ty_a = self.tcx.mk_ref(r, TypeAndMut {
ty: referent_ty,
mutbl: mt_b.mutbl // [1] above
});
source,
target);
- let traits = (self.tcx().lang_items.unsize_trait(),
- self.tcx().lang_items.coerce_unsized_trait());
+ let traits = (self.tcx.lang_items.unsize_trait(),
+ self.tcx.lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
let coercion = Coercion(self.origin.span());
- let r_borrow = self.fcx.infcx().next_region_var(coercion);
- let region = self.tcx().mk_region(r_borrow);
+ let r_borrow = self.next_region_var(coercion);
+ let region = self.tcx.mk_region(r_borrow);
(mt_a.ty, Some(AutoPtr(region, mt_b.mutbl)))
}
(&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => {
}
_ => (source, None)
};
- let source = source.adjust_for_autoref(self.tcx(), reborrow);
+ let source = source.adjust_for_autoref(self.tcx, reborrow);
- let mut selcx = traits::SelectionContext::new(self.fcx.infcx());
+ let mut selcx = traits::SelectionContext::new(self);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
- let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
- queue.push_back(predicate_for_trait_def(self.tcx(),
- cause,
- coerce_unsized_did,
- 0,
- source,
- vec![target]));
+ let cause = ObligationCause::misc(self.origin.span(), self.body_id);
+ queue.push_back(self.tcx.predicate_for_trait_def(cause,
+ coerce_unsized_did,
+ 0,
+ source,
+ vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// Object safety violations or miscellaneous.
Err(err) => {
- report_selection_error(self.fcx.infcx(), &obligation, &err, None);
+ self.report_selection_error(&obligation, &err, None);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
* into a closure or a `proc`.
*/
- let b = self.fcx.infcx().shallow_resolve(b);
+ let b = self.shallow_resolve(b);
debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b);
if let ty::TyFnPtr(fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(hir::Unsafety::Normal, hir::Unsafety::Unsafe) => {
- let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
+ let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
return self.unify_and_identity(unsafe_a, b).map(|(ty, _)| {
(ty, AdjustUnsafeFnPointer)
});
* into a closure or a `proc`.
*/
- let b = self.fcx.infcx().shallow_resolve(b);
+ let b = self.shallow_resolve(b);
debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
match b.sty {
ty::TyFnPtr(_) => {
- let a_fn_pointer = self.tcx().mk_ty(ty::TyFnPtr(fn_ty_a));
+ let a_fn_pointer = self.tcx.mk_fn_ptr(fn_ty_a);
self.unify_and_identity(a_fn_pointer, b).map(|(ty, _)| {
(ty, AdjustReifyFnPointer)
})
};
// Check that the types which they point at are compatible.
- let a_unsafe = self.tcx().mk_ptr(ty::TypeAndMut{ mutbl: mutbl_b, ty: mt_a.ty });
+ let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut{ mutbl: mutbl_b, ty: mt_a.ty });
let (ty, noop) = self.unify_and_identity(a_unsafe, b)?;
coerce_mutbls(mt_a.mutbl, mutbl_b)?;
}
}
-fn apply<'a, 'b, 'tcx, E, I>(coerce: &mut Coerce<'a, 'tcx>,
- exprs: &E,
- a: Ty<'tcx>,
- b: Ty<'tcx>)
- -> CoerceResult<'tcx>
+fn apply<'a, 'b, 'gcx, 'tcx, E, I>(coerce: &mut Coerce<'a, 'gcx, 'tcx>,
+ exprs: &E,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>)
+ -> CoerceResult<'tcx>
where E: Fn() -> I,
I: IntoIterator<Item=&'b hir::Expr> {
Ok((ty, adjustment))
}
-/// Attempt to coerce an expression to a type, and return the
-/// adjusted type of the expression, if successful.
-/// Adjustments are only recorded if the coercion succeeded.
-/// The expressions *must not* have any pre-existing adjustments.
-pub fn try<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &hir::Expr,
- target: Ty<'tcx>)
- -> RelateResult<'tcx, Ty<'tcx>> {
- let source = fcx.resolve_type_vars_if_possible(fcx.expr_ty(expr));
- debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
-
- let mut coerce = Coerce::new(fcx, TypeOrigin::ExprAssignable(expr.span));
- fcx.infcx().commit_if_ok(|_| {
- let (ty, adjustment) =
- apply(&mut coerce, &|| Some(expr), source, target)?;
- if !adjustment.is_identity() {
- debug!("Success, coerced with {:?}", adjustment);
- assert!(!fcx.inh.tables.borrow().adjustments.contains_key(&expr.id));
- fcx.write_adjustment(expr.id, adjustment);
- }
- Ok(ty)
- })
-}
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ /// Attempt to coerce an expression to a type, and return the
+ /// adjusted type of the expression, if successful.
+ /// Adjustments are only recorded if the coercion succeeded.
+ /// The expressions *must not* have any pre-existing adjustments.
+ pub fn try_coerce(&self,
+ expr: &hir::Expr,
+ target: Ty<'tcx>)
+ -> RelateResult<'tcx, Ty<'tcx>> {
+ let source = self.resolve_type_vars_with_obligations(self.expr_ty(expr));
+ debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
+
+ let mut coerce = Coerce::new(self, TypeOrigin::ExprAssignable(expr.span));
+ self.commit_if_ok(|_| {
+ let (ty, adjustment) =
+ apply(&mut coerce, &|| Some(expr), source, target)?;
+ if !adjustment.is_identity() {
+ debug!("Success, coerced with {:?}", adjustment);
+ assert!(!self.tables.borrow().adjustments.contains_key(&expr.id));
+ self.write_adjustment(expr.id, adjustment);
+ }
+ Ok(ty)
+ })
+ }
-/// Given some expressions, their known unified type and another expression,
-/// tries to unify the types, potentially inserting coercions on any of the
-/// provided expressions and returns their LUB (aka "common supertype").
-pub fn try_find_lub<'a, 'b, 'tcx, E, I>(fcx: &FnCtxt<'a, 'tcx>,
- origin: TypeOrigin,
- exprs: E,
- prev_ty: Ty<'tcx>,
- new: &'b hir::Expr)
- -> RelateResult<'tcx, Ty<'tcx>>
- // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
- where E: Fn() -> I,
- I: IntoIterator<Item=&'b hir::Expr> {
+ /// Given some expressions, their known unified type and another expression,
+ /// tries to unify the types, potentially inserting coercions on any of the
+ /// provided expressions and returns their LUB (aka "common supertype").
+ pub fn try_find_coercion_lub<'b, E, I>(&self,
+ origin: TypeOrigin,
+ exprs: E,
+ prev_ty: Ty<'tcx>,
+ new: &'b hir::Expr)
+ -> RelateResult<'tcx, Ty<'tcx>>
+ // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
+ where E: Fn() -> I,
+ I: IntoIterator<Item=&'b hir::Expr> {
- let prev_ty = fcx.resolve_type_vars_if_possible(prev_ty);
- let new_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(new));
- debug!("coercion::try_find_lub({:?}, {:?})", prev_ty, new_ty);
-
- let trace = TypeTrace::types(origin, true, prev_ty, new_ty);
-
- // Special-case that coercion alone cannot handle:
- // Two function item types of differing IDs or Substs.
- match (&prev_ty.sty, &new_ty.sty) {
- (&ty::TyFnDef(a_def_id, a_substs, a_fty),
- &ty::TyFnDef(b_def_id, b_substs, b_fty)) => {
- // The signature must always match.
- let fty = fcx.infcx().lub(true, trace.clone(), a_fty, b_fty)
- .map(|InferOk { value, obligations }| {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- value
- })?;
-
- if a_def_id == b_def_id {
- // Same function, maybe the parameters match.
- let substs = fcx.infcx().commit_if_ok(|_| {
- fcx.infcx().lub(true, trace.clone(), a_substs, b_substs)
- .map(|InferOk { value, obligations }| {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- value
- })
- }).map(|s| fcx.tcx().mk_substs(s));
+ let prev_ty = self.resolve_type_vars_with_obligations(prev_ty);
+ let new_ty = self.resolve_type_vars_with_obligations(self.expr_ty(new));
+ debug!("coercion::try_find_lub({:?}, {:?})", prev_ty, new_ty);
- if let Ok(substs) = substs {
- // We have a LUB of prev_ty and new_ty, just return it.
- return Ok(fcx.tcx().mk_fn_def(a_def_id, substs, fty));
- }
- }
+ let trace = TypeTrace::types(origin, true, prev_ty, new_ty);
- // Reify both sides and return the reified fn pointer type.
- for expr in exprs().into_iter().chain(Some(new)) {
- // No adjustments can produce a fn item, so this should never trip.
- assert!(!fcx.inh.tables.borrow().adjustments.contains_key(&expr.id));
- fcx.write_adjustment(expr.id, AdjustReifyFnPointer);
- }
- return Ok(fcx.tcx().mk_fn_ptr(fty));
- }
- _ => {}
- }
+ // Special-case that coercion alone cannot handle:
+ // Two function item types of differing IDs or Substs.
+ match (&prev_ty.sty, &new_ty.sty) {
+ (&ty::TyFnDef(a_def_id, a_substs, a_fty),
+ &ty::TyFnDef(b_def_id, b_substs, b_fty)) => {
+ // The signature must always match.
+ let fty = self.lub(true, trace.clone(), &a_fty, &b_fty)
+ .map(|InferOk { value, obligations }| {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ value
+ })?;
+
+ if a_def_id == b_def_id {
+ // Same function, maybe the parameters match.
+ let substs = self.commit_if_ok(|_| {
+ self.lub(true, trace.clone(), &a_substs, &b_substs)
+ .map(|InferOk { value, obligations }| {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ value
+ })
+ });
- let mut coerce = Coerce::new(fcx, origin);
- coerce.use_lub = true;
+ if let Ok(substs) = substs {
+ // We have a LUB of prev_ty and new_ty, just return it.
+ return Ok(self.tcx.mk_fn_def(a_def_id, substs, fty));
+ }
+ }
- // First try to coerce the new expression to the type of the previous ones,
- // but only if the new expression has no coercion already applied to it.
- let mut first_error = None;
- if !fcx.inh.tables.borrow().adjustments.contains_key(&new.id) {
- let result = fcx.infcx().commit_if_ok(|_| {
- apply(&mut coerce, &|| Some(new), new_ty, prev_ty)
- });
- match result {
- Ok((ty, adjustment)) => {
- if !adjustment.is_identity() {
- fcx.write_adjustment(new.id, adjustment);
+ // Reify both sides and return the reified fn pointer type.
+ for expr in exprs().into_iter().chain(Some(new)) {
+ // No adjustments can produce a fn item, so this should never trip.
+ assert!(!self.tables.borrow().adjustments.contains_key(&expr.id));
+ self.write_adjustment(expr.id, AdjustReifyFnPointer);
}
- return Ok(ty);
+ return Ok(self.tcx.mk_fn_ptr(fty));
}
- Err(e) => first_error = Some(e)
+ _ => {}
}
- }
- // Then try to coerce the previous expressions to the type of the new one.
- // This requires ensuring there are no coercions applied to *any* of the
- // previous expressions, other than noop reborrows (ignoring lifetimes).
- for expr in exprs() {
- let noop = match fcx.inh.tables.borrow().adjustments.get(&expr.id) {
- Some(&AdjustDerefRef(AutoDerefRef {
- autoderefs: 1,
- autoref: Some(AutoPtr(_, mutbl_adj)),
- unsize: None
- })) => match fcx.expr_ty(expr).sty {
- ty::TyRef(_, mt_orig) => {
- // Reborrow that we can safely ignore.
- mutbl_adj == mt_orig.mutbl
- }
- _ => false
- },
- Some(_) => false,
- None => true
- };
+ let mut coerce = Coerce::new(self, origin);
+ coerce.use_lub = true;
- if !noop {
- return fcx.infcx().commit_if_ok(|_| {
- fcx.infcx().lub(true, trace.clone(), &prev_ty, &new_ty)
- .map(|InferOk { value, obligations }| {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- value
- })
+ // First try to coerce the new expression to the type of the previous ones,
+ // but only if the new expression has no coercion already applied to it.
+ let mut first_error = None;
+ if !self.tables.borrow().adjustments.contains_key(&new.id) {
+ let result = self.commit_if_ok(|_| {
+ apply(&mut coerce, &|| Some(new), new_ty, prev_ty)
});
+ match result {
+ Ok((ty, adjustment)) => {
+ if !adjustment.is_identity() {
+ self.write_adjustment(new.id, adjustment);
+ }
+ return Ok(ty);
+ }
+ Err(e) => first_error = Some(e)
+ }
}
- }
- match fcx.infcx().commit_if_ok(|_| apply(&mut coerce, &exprs, prev_ty, new_ty)) {
- Err(_) => {
- // Avoid giving strange errors on failed attempts.
- if let Some(e) = first_error {
- Err(e)
- } else {
- fcx.infcx().commit_if_ok(|_| {
- fcx.infcx().lub(true, trace, &prev_ty, &new_ty)
+ // Then try to coerce the previous expressions to the type of the new one.
+ // This requires ensuring there are no coercions applied to *any* of the
+ // previous expressions, other than noop reborrows (ignoring lifetimes).
+ for expr in exprs() {
+ let noop = match self.tables.borrow().adjustments.get(&expr.id) {
+ Some(&AdjustDerefRef(AutoDerefRef {
+ autoderefs: 1,
+ autoref: Some(AutoPtr(_, mutbl_adj)),
+ unsize: None
+ })) => match self.expr_ty(expr).sty {
+ ty::TyRef(_, mt_orig) => {
+ // Reborrow that we can safely ignore.
+ mutbl_adj == mt_orig.mutbl
+ }
+ _ => false
+ },
+ Some(_) => false,
+ None => true
+ };
+
+ if !noop {
+ return self.commit_if_ok(|_| {
+ self.lub(true, trace.clone(), &prev_ty, &new_ty)
.map(|InferOk { value, obligations }| {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
value
})
- })
+ });
}
}
- Ok((ty, adjustment)) => {
- if !adjustment.is_identity() {
- for expr in exprs() {
- fcx.write_adjustment(expr.id, adjustment);
+
+ match self.commit_if_ok(|_| apply(&mut coerce, &exprs, prev_ty, new_ty)) {
+ Err(_) => {
+ // Avoid giving strange errors on failed attempts.
+ if let Some(e) = first_error {
+ Err(e)
+ } else {
+ self.commit_if_ok(|_| {
+ self.lub(true, trace, &prev_ty, &new_ty)
+ .map(|InferOk { value, obligations }| {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ value
+ })
+ })
}
}
- Ok(ty)
+ Ok((ty, adjustment)) => {
+ if !adjustment.is_identity() {
+ for expr in exprs() {
+ self.write_adjustment(expr.id, adjustment);
+ }
+ }
+ Ok(ty)
+ }
}
}
}
use middle::free_region::FreeRegionMap;
use rustc::infer::{self, InferOk, TypeOrigin};
-use rustc::ty::{self, TyCtxt};
+use rustc::ty;
use rustc::traits::{self, ProjectionMode};
use rustc::ty::subst::{self, Subst, Substs, VecPerParamSpace};
use syntax::ast;
use syntax::codemap::Span;
+use CrateCtxt;
use super::assoc;
/// Checks that a method from an impl conforms to the signature of
/// - trait_m: the method in the trait
/// - impl_trait_ref: the TraitRef corresponding to the trait implementation
-pub fn compare_impl_method<'tcx>(tcx: &TyCtxt<'tcx>,
- impl_m: &ty::Method<'tcx>,
- impl_m_span: Span,
- impl_m_body_id: ast::NodeId,
- trait_m: &ty::Method<'tcx>,
- impl_trait_ref: &ty::TraitRef<'tcx>) {
+pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ impl_m: &ty::Method<'tcx>,
+ impl_m_span: Span,
+ impl_m_body_id: ast::NodeId,
+ trait_m: &ty::Method<'tcx>,
+ impl_trait_ref: &ty::TraitRef<'tcx>) {
debug!("compare_impl_method(impl_trait_ref={:?})",
impl_trait_ref);
debug!("compare_impl_method: impl_trait_ref (liberated) = {:?}",
impl_trait_ref);
- let mut infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
- let mut fulfillment_cx = traits::FulfillmentContext::new();
+ let tcx = ccx.tcx;
let trait_to_impl_substs = &impl_trait_ref.substs;
// Create mapping from trait to skolemized.
let trait_to_skol_substs =
trait_to_impl_substs
- .subst(tcx, impl_to_skol_substs)
+ .subst(tcx, impl_to_skol_substs).clone()
.with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec());
debug!("compare_impl_method: trait_to_skol_substs={:?}",
// Check region bounds. FIXME(@jroesch) refactor this away when removing
// ParamBounds.
- if !check_region_bounds_on_impl_method(tcx,
+ if !check_region_bounds_on_impl_method(ccx,
impl_m_span,
impl_m,
&trait_m.generics,
return;
}
- // Create obligations for each predicate declared by the impl
- // definition in the context of the trait's parameter
- // environment. We can't just use `impl_env.caller_bounds`,
- // however, because we want to replace all late-bound regions with
- // region variables.
- let impl_bounds =
- impl_m.predicates.instantiate(tcx, impl_to_skol_substs);
-
- let (impl_bounds, _) =
- infcx.replace_late_bound_regions_with_fresh_var(
- impl_m_span,
- infer::HigherRankedType,
- &ty::Binder(impl_bounds));
- debug!("compare_impl_method: impl_bounds={:?}",
- impl_bounds);
-
- // Normalize the associated types in the trait_bounds.
- let trait_bounds = trait_m.predicates.instantiate(tcx, &trait_to_skol_substs);
-
- // Obtain the predicate split predicate sets for each.
- let trait_pred = trait_bounds.predicates.split();
- let impl_pred = impl_bounds.predicates.split();
-
- // This is the only tricky bit of the new way we check implementation methods
- // We need to build a set of predicates where only the FnSpace bounds
- // are from the trait and we assume all other bounds from the implementation
- // to be previously satisfied.
- //
- // We then register the obligations from the impl_m and check to see
- // if all constraints hold.
- let hybrid_preds = VecPerParamSpace::new(
- impl_pred.types,
- impl_pred.selfs,
- trait_pred.fns
- );
-
- // Construct trait parameter environment and then shift it into the skolemized viewpoint.
- // The key step here is to update the caller_bounds's predicates to be
- // the new hybrid bounds we computed.
- let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_body_id);
- let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.into_vec());
- let trait_param_env = traits::normalize_param_env_or_error(trait_param_env,
- normalize_cause.clone());
- // FIXME(@jroesch) this seems ugly, but is a temporary change
- infcx.parameter_environment = trait_param_env;
-
- debug!("compare_impl_method: trait_bounds={:?}",
- infcx.parameter_environment.caller_bounds);
-
- let mut selcx = traits::SelectionContext::new(&infcx);
-
- for predicate in impl_pred.fns {
- let traits::Normalized { value: predicate, .. } =
- traits::normalize(&mut selcx, normalize_cause.clone(), &predicate);
-
- let cause = traits::ObligationCause {
- span: impl_m_span,
- body_id: impl_m_body_id,
- code: traits::ObligationCauseCode::CompareImplMethodObligation
- };
-
- fulfillment_cx.register_predicate_obligation(
- &infcx,
- traits::Obligation::new(cause, predicate));
- }
+ tcx.infer_ctxt(None, None, ProjectionMode::AnyFinal).enter(|mut infcx| {
+ let mut fulfillment_cx = traits::FulfillmentContext::new();
+
+ // Normalize the associated types in the trait_bounds.
+ let trait_bounds = trait_m.predicates.instantiate(tcx, &trait_to_skol_substs);
+
+ // Create obligations for each predicate declared by the impl
+ // definition in the context of the trait's parameter
+ // environment. We can't just use `impl_env.caller_bounds`,
+ // however, because we want to replace all late-bound regions with
+ // region variables.
+ let impl_bounds =
+ impl_m.predicates.instantiate(tcx, impl_to_skol_substs);
+
+ debug!("compare_impl_method: impl_bounds={:?}", impl_bounds);
+
+ // Obtain the predicate split predicate sets for each.
+ let trait_pred = trait_bounds.predicates.split();
+ let impl_pred = impl_bounds.predicates.split();
+
+ // This is the only tricky bit of the new way we check implementation methods
+ // We need to build a set of predicates where only the FnSpace bounds
+ // are from the trait and we assume all other bounds from the implementation
+ // to be previously satisfied.
+ //
+ // We then register the obligations from the impl_m and check to see
+ // if all constraints hold.
+ let hybrid_preds = VecPerParamSpace::new(
+ impl_pred.types,
+ impl_pred.selfs,
+ trait_pred.fns
+ );
+
+ // Construct trait parameter environment and then shift it into the skolemized viewpoint.
+ // The key step here is to update the caller_bounds's predicates to be
+ // the new hybrid bounds we computed.
+ let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_body_id);
+ let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.into_vec());
+ let trait_param_env = traits::normalize_param_env_or_error(tcx,
+ trait_param_env,
+ normalize_cause.clone());
+ // FIXME(@jroesch) this seems ugly, but is a temporary change
+ infcx.parameter_environment = trait_param_env;
+
+ debug!("compare_impl_method: trait_bounds={:?}",
+ infcx.parameter_environment.caller_bounds);
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+
+ let (impl_pred_fns, _) =
+ infcx.replace_late_bound_regions_with_fresh_var(
+ impl_m_span,
+ infer::HigherRankedType,
+ &ty::Binder(impl_pred.fns));
+ for predicate in impl_pred_fns {
+ let traits::Normalized { value: predicate, .. } =
+ traits::normalize(&mut selcx, normalize_cause.clone(), &predicate);
+
+ let cause = traits::ObligationCause {
+ span: impl_m_span,
+ body_id: impl_m_body_id,
+ code: traits::ObligationCauseCode::CompareImplMethodObligation
+ };
+
+ fulfillment_cx.register_predicate_obligation(
+ &infcx,
+ traits::Obligation::new(cause, predicate));
+ }
- // We now need to check that the signature of the impl method is
- // compatible with that of the trait method. We do this by
- // checking that `impl_fty <: trait_fty`.
- //
- // FIXME. Unfortunately, this doesn't quite work right now because
- // associated type normalization is not integrated into subtype
- // checks. For the comparison to be valid, we need to
- // normalize the associated types in the impl/trait methods
- // first. However, because function types bind regions, just
- // calling `normalize_associated_types_in` would have no effect on
- // any associated types appearing in the fn arguments or return
- // type.
-
- // Compute skolemized form of impl and trait method tys.
- let impl_fty = tcx.mk_fn_ptr(impl_m.fty.clone());
- let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs);
- let trait_fty = tcx.mk_fn_ptr(trait_m.fty.clone());
- let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
-
- let err = infcx.commit_if_ok(|snapshot| {
- let origin = TypeOrigin::MethodCompatCheck(impl_m_span);
-
- let (impl_sig, _) =
- infcx.replace_late_bound_regions_with_fresh_var(impl_m_span,
- infer::HigherRankedType,
- &impl_m.fty.sig);
- let impl_sig =
- impl_sig.subst(tcx, impl_to_skol_substs);
- let impl_sig =
- assoc::normalize_associated_types_in(&infcx,
- &mut fulfillment_cx,
- impl_m_span,
- impl_m_body_id,
- &impl_sig);
- let impl_fty = tcx.mk_fn_ptr(ty::BareFnTy {
- unsafety: impl_m.fty.unsafety,
- abi: impl_m.fty.abi,
- sig: ty::Binder(impl_sig)
- });
- debug!("compare_impl_method: impl_fty={:?}",
- impl_fty);
-
- let (trait_sig, skol_map) =
- infcx.skolemize_late_bound_regions(&trait_m.fty.sig, snapshot);
- let trait_sig =
- trait_sig.subst(tcx, &trait_to_skol_substs);
- let trait_sig =
- assoc::normalize_associated_types_in(&infcx,
- &mut fulfillment_cx,
- impl_m_span,
- impl_m_body_id,
- &trait_sig);
- let trait_fty = tcx.mk_fn_ptr(ty::BareFnTy {
- unsafety: trait_m.fty.unsafety,
- abi: trait_m.fty.abi,
- sig: ty::Binder(trait_sig)
- });
+ // We now need to check that the signature of the impl method is
+ // compatible with that of the trait method. We do this by
+ // checking that `impl_fty <: trait_fty`.
+ //
+ // FIXME. Unfortunately, this doesn't quite work right now because
+ // associated type normalization is not integrated into subtype
+ // checks. For the comparison to be valid, we need to
+ // normalize the associated types in the impl/trait methods
+ // first. However, because function types bind regions, just
+ // calling `normalize_associated_types_in` would have no effect on
+ // any associated types appearing in the fn arguments or return
+ // type.
+
+ // Compute skolemized form of impl and trait method tys.
+ let impl_fty = tcx.mk_fn_ptr(impl_m.fty);
+ let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs);
+ let trait_fty = tcx.mk_fn_ptr(trait_m.fty);
+ let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
+
+ let err = infcx.commit_if_ok(|snapshot| {
+ let tcx = infcx.tcx;
+ let origin = TypeOrigin::MethodCompatCheck(impl_m_span);
+
+ let (impl_sig, _) =
+ infcx.replace_late_bound_regions_with_fresh_var(impl_m_span,
+ infer::HigherRankedType,
+ &impl_m.fty.sig);
+ let impl_sig =
+ impl_sig.subst(tcx, impl_to_skol_substs);
+ let impl_sig =
+ assoc::normalize_associated_types_in(&infcx,
+ &mut fulfillment_cx,
+ impl_m_span,
+ impl_m_body_id,
+ &impl_sig);
+ let impl_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: impl_m.fty.unsafety,
+ abi: impl_m.fty.abi,
+ sig: ty::Binder(impl_sig)
+ }));
+ debug!("compare_impl_method: impl_fty={:?}",
+ impl_fty);
+
+ let (trait_sig, skol_map) =
+ infcx.skolemize_late_bound_regions(&trait_m.fty.sig, snapshot);
+ let trait_sig =
+ trait_sig.subst(tcx, &trait_to_skol_substs);
+ let trait_sig =
+ assoc::normalize_associated_types_in(&infcx,
+ &mut fulfillment_cx,
+ impl_m_span,
+ impl_m_body_id,
+ &trait_sig);
+ let trait_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: trait_m.fty.unsafety,
+ abi: trait_m.fty.abi,
+ sig: ty::Binder(trait_sig)
+ }));
+
+ debug!("compare_impl_method: trait_fty={:?}",
+ trait_fty);
- debug!("compare_impl_method: trait_fty={:?}",
- trait_fty);
+ infcx.sub_types(false, origin, impl_fty, trait_fty)?;
- infer::mk_subty(&infcx, false, origin, impl_fty, trait_fty)?;
+ infcx.leak_check(false, &skol_map, snapshot)
+ });
- infcx.leak_check(&skol_map, snapshot)
- });
+ match err {
+ Ok(()) => { }
+ Err(terr) => {
+ debug!("checking trait method for compatibility: impl ty {:?}, trait ty {:?}",
+ impl_fty,
+ trait_fty);
+ span_err!(tcx.sess, impl_m_span, E0053,
+ "method `{}` has an incompatible type for trait: {}",
+ trait_m.name,
+ terr);
+ return;
+ }
+ }
- match err {
- Ok(()) => { }
- Err(terr) => {
- debug!("checking trait method for compatibility: impl ty {:?}, trait ty {:?}",
- impl_fty,
- trait_fty);
- span_err!(tcx.sess, impl_m_span, E0053,
- "method `{}` has an incompatible type for trait: {}",
- trait_m.name,
- terr);
- return;
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ match fulfillment_cx.select_all_or_error(&infcx) {
+ Err(ref errors) => { infcx.report_fulfillment_errors(errors) }
+ Ok(_) => {}
}
- }
- // Check that all obligations are satisfied by the implementation's
- // version.
- match fulfillment_cx.select_all_or_error(&infcx) {
- Err(ref errors) => { traits::report_fulfillment_errors(&infcx, errors) }
- Ok(_) => {}
- }
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters. We have to build up a plausible lifetime
+ // environment based on what we find in the trait. We could also
+ // include the obligations derived from the method argument types,
+ // but I don't think it's necessary -- after all, those are still
+ // in effect when type-checking the body, and all the
+ // where-clauses in the header etc should be implied by the trait
+ // anyway, so it shouldn't be needed there either. Anyway, we can
+ // always add more relations later (it's backwards compat).
+ let mut free_regions = FreeRegionMap::new();
+ free_regions.relate_free_regions_from_predicates(
+ &infcx.parameter_environment.caller_bounds);
+
+ infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id);
+ });
- // Finally, resolve all regions. This catches wily misuses of
- // lifetime parameters. We have to build up a plausible lifetime
- // environment based on what we find in the trait. We could also
- // include the obligations derived from the method argument types,
- // but I don't think it's necessary -- after all, those are still
- // in effect when type-checking the body, and all the
- // where-clauses in the header etc should be implied by the trait
- // anyway, so it shouldn't be needed there either. Anyway, we can
- // always add more relations later (it's backwards compat).
- let mut free_regions = FreeRegionMap::new();
- free_regions.relate_free_regions_from_predicates(tcx,
- &infcx.parameter_environment.caller_bounds);
-
- infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id);
-
- fn check_region_bounds_on_impl_method<'tcx>(tcx: &TyCtxt<'tcx>,
- span: Span,
- impl_m: &ty::Method<'tcx>,
- trait_generics: &ty::Generics<'tcx>,
- impl_generics: &ty::Generics<'tcx>,
- trait_to_skol_substs: &Substs<'tcx>,
- impl_to_skol_substs: &Substs<'tcx>)
- -> bool
+ fn check_region_bounds_on_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ span: Span,
+ impl_m: &ty::Method<'tcx>,
+ trait_generics: &ty::Generics<'tcx>,
+ impl_generics: &ty::Generics<'tcx>,
+ trait_to_skol_substs: &Substs<'tcx>,
+ impl_to_skol_substs: &Substs<'tcx>)
+ -> bool
{
let trait_params = trait_generics.regions.get_slice(subst::FnSpace);
// are zero. Since I don't quite know how to phrase things at
// the moment, give a kind of vague error message.
if trait_params.len() != impl_params.len() {
- span_err!(tcx.sess, span, E0195,
+ span_err!(ccx.tcx.sess, span, E0195,
"lifetime parameters or bounds on method `{}` do \
not match the trait declaration",
impl_m.name);
}
}
-pub fn compare_const_impl<'tcx>(tcx: &TyCtxt<'tcx>,
- impl_c: &ty::AssociatedConst<'tcx>,
- impl_c_span: Span,
- trait_c: &ty::AssociatedConst<'tcx>,
- impl_trait_ref: &ty::TraitRef<'tcx>) {
+pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ impl_c: &ty::AssociatedConst<'tcx>,
+ impl_c_span: Span,
+ trait_c: &ty::AssociatedConst<'tcx>,
+ impl_trait_ref: &ty::TraitRef<'tcx>) {
debug!("compare_const_impl(impl_trait_ref={:?})",
impl_trait_ref);
- let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
- let mut fulfillment_cx = traits::FulfillmentContext::new();
-
- // The below is for the most part highly similar to the procedure
- // for methods above. It is simpler in many respects, especially
- // because we shouldn't really have to deal with lifetimes or
- // predicates. In fact some of this should probably be put into
- // shared functions because of DRY violations...
- let trait_to_impl_substs = &impl_trait_ref.substs;
-
- // Create a parameter environment that represents the implementation's
- // method.
- let impl_c_node_id = tcx.map.as_local_node_id(impl_c.def_id).unwrap();
- let impl_param_env = ty::ParameterEnvironment::for_item(tcx, impl_c_node_id);
-
- // Create mapping from impl to skolemized.
- let impl_to_skol_substs = &impl_param_env.free_substs;
-
- // Create mapping from trait to skolemized.
- let trait_to_skol_substs =
- trait_to_impl_substs
- .subst(tcx, impl_to_skol_substs)
- .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
- impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec());
- debug!("compare_const_impl: trait_to_skol_substs={:?}",
- trait_to_skol_substs);
-
- // Compute skolemized form of impl and trait const tys.
- let impl_ty = impl_c.ty.subst(tcx, impl_to_skol_substs);
- let trait_ty = trait_c.ty.subst(tcx, &trait_to_skol_substs);
-
- let err = infcx.commit_if_ok(|_| {
- let origin = TypeOrigin::Misc(impl_c_span);
-
- // There is no "body" here, so just pass dummy id.
- let impl_ty =
- assoc::normalize_associated_types_in(&infcx,
- &mut fulfillment_cx,
- impl_c_span,
- 0,
- &impl_ty);
-
- debug!("compare_const_impl: impl_ty={:?}",
- impl_ty);
-
- let trait_ty =
- assoc::normalize_associated_types_in(&infcx,
- &mut fulfillment_cx,
- impl_c_span,
- 0,
- &trait_ty);
-
- debug!("compare_const_impl: trait_ty={:?}",
- trait_ty);
-
- infer::mk_subty(&infcx, false, origin, impl_ty, trait_ty)
- });
+ let tcx = ccx.tcx;
+ tcx.infer_ctxt(None, None, ProjectionMode::AnyFinal).enter(|infcx| {
+ let mut fulfillment_cx = traits::FulfillmentContext::new();
+
+ // The below is for the most part highly similar to the procedure
+ // for methods above. It is simpler in many respects, especially
+ // because we shouldn't really have to deal with lifetimes or
+ // predicates. In fact some of this should probably be put into
+ // shared functions because of DRY violations...
+ let trait_to_impl_substs = &impl_trait_ref.substs;
+
+ // Create a parameter environment that represents the implementation's
+ // method.
+ let impl_c_node_id = tcx.map.as_local_node_id(impl_c.def_id).unwrap();
+ let impl_param_env = ty::ParameterEnvironment::for_item(tcx, impl_c_node_id);
+
+ // Create mapping from impl to skolemized.
+ let impl_to_skol_substs = &impl_param_env.free_substs;
+
+ // Create mapping from trait to skolemized.
+ let trait_to_skol_substs =
+ trait_to_impl_substs
+ .subst(tcx, impl_to_skol_substs).clone()
+ .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
+ impl_to_skol_substs.regions.get_slice(subst::FnSpace).to_vec());
+ debug!("compare_const_impl: trait_to_skol_substs={:?}",
+ trait_to_skol_substs);
+
+ // Compute skolemized form of impl and trait const tys.
+ let impl_ty = impl_c.ty.subst(tcx, impl_to_skol_substs);
+ let trait_ty = trait_c.ty.subst(tcx, &trait_to_skol_substs);
+
+ let err = infcx.commit_if_ok(|_| {
+ let origin = TypeOrigin::Misc(impl_c_span);
+
+ // There is no "body" here, so just pass dummy id.
+ let impl_ty =
+ assoc::normalize_associated_types_in(&infcx,
+ &mut fulfillment_cx,
+ impl_c_span,
+ 0,
+ &impl_ty);
+
+ debug!("compare_const_impl: impl_ty={:?}",
+ impl_ty);
+
+ let trait_ty =
+ assoc::normalize_associated_types_in(&infcx,
+ &mut fulfillment_cx,
+ impl_c_span,
+ 0,
+ &trait_ty);
+
+ debug!("compare_const_impl: trait_ty={:?}",
+ trait_ty);
+
+ infcx.sub_types(false, origin, impl_ty, trait_ty)
+ .map(|InferOk { obligations, .. }| {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty())
+ })
+ });
- match err {
- Ok(InferOk { obligations, .. }) => {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty())
- }
- Err(terr) => {
+ if let Err(terr) = err {
debug!("checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
impl_ty,
trait_ty);
trait: {}",
trait_c.name,
terr);
- return;
}
- }
+ });
}
// except according to those terms.
-use check::{coercion, FnCtxt};
+use check::FnCtxt;
use rustc::ty::Ty;
use rustc::infer::{InferOk, TypeOrigin};
use syntax::codemap::Span;
use rustc::hir;
-// Requires that the two types unify, and prints an error message if
-// they don't.
-pub fn suptype<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, sp: Span,
- expected: Ty<'tcx>, actual: Ty<'tcx>) {
- let origin = TypeOrigin::Misc(sp);
- match fcx.infcx().sub_types(false, origin, actual, expected) {
- Ok(InferOk { obligations, .. }) => {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- },
- Err(e) => {
- fcx.infcx().report_mismatched_types(origin, expected, actual, e);
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ // Requires that the two types unify, and prints an error message if
+ // they don't.
+ pub fn demand_suptype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) {
+ let origin = TypeOrigin::Misc(sp);
+ match self.sub_types(false, origin, actual, expected) {
+ Ok(InferOk { obligations, .. }) => {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ },
+ Err(e) => {
+ self.report_mismatched_types(origin, expected, actual, e);
+ }
}
}
-}
-pub fn eqtype<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, sp: Span,
- expected: Ty<'tcx>, actual: Ty<'tcx>) {
- let origin = TypeOrigin::Misc(sp);
- match fcx.infcx().eq_types(false, origin, actual, expected) {
- Ok(InferOk { obligations, .. }) => {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- },
- Err(e) => {
- fcx.infcx().report_mismatched_types(origin, expected, actual, e);
+ pub fn demand_eqtype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) {
+ let origin = TypeOrigin::Misc(sp);
+ match self.eq_types(false, origin, actual, expected) {
+ Ok(InferOk { obligations, .. }) => {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ },
+ Err(e) => {
+ self.report_mismatched_types(origin, expected, actual, e);
+ }
}
}
-}
-// Checks that the type of `expr` can be coerced to `expected`.
-pub fn coerce<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- expected: Ty<'tcx>,
- expr: &hir::Expr) {
- let expected = fcx.resolve_type_vars_if_possible(expected);
- if let Err(e) = coercion::try(fcx, expr, expected) {
- let origin = TypeOrigin::Misc(sp);
- let expr_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(expr));
- fcx.infcx().report_mismatched_types(origin, expected, expr_ty, e);
+ // Checks that the type of `expr` can be coerced to `expected`.
+ pub fn demand_coerce(&self, expr: &hir::Expr, expected: Ty<'tcx>) {
+ let expected = self.resolve_type_vars_with_obligations(expected);
+ if let Err(e) = self.try_coerce(expr, expected) {
+ let origin = TypeOrigin::Misc(expr.span);
+ let expr_ty = self.resolve_type_vars_with_obligations(self.expr_ty(expr));
+ self.report_mismatched_types(origin, expected, expr_ty, e);
+ }
+ }
+
+ pub fn require_same_types(&self, span: Span, t1: Ty<'tcx>, t2: Ty<'tcx>, msg: &str)
+ -> bool {
+ if let Err(err) = self.eq_types(false, TypeOrigin::Misc(span), t1, t2) {
+ let found_ty = self.resolve_type_vars_if_possible(&t1);
+ let expected_ty = self.resolve_type_vars_if_possible(&t2);
+ ::emit_type_err(self.tcx, span, found_ty, expected_ty, &err, msg);
+ false
+ } else {
+ true
+ }
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use check::regionck::{self, Rcx};
+use CrateCtxt;
+use check::regionck::RegionCtxt;
use hir::def_id::DefId;
use middle::free_region::FreeRegionMap;
/// struct/enum definition for the nominal type itself (i.e.
/// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`).
///
-pub fn check_drop_impl(tcx: &TyCtxt, drop_impl_did: DefId) -> Result<(), ()> {
+pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> {
let ty::TypeScheme { generics: ref dtor_generics,
- ty: dtor_self_type } = tcx.lookup_item_type(drop_impl_did);
- let dtor_predicates = tcx.lookup_predicates(drop_impl_did);
+ ty: dtor_self_type } = ccx.tcx.lookup_item_type(drop_impl_did);
+ let dtor_predicates = ccx.tcx.lookup_predicates(drop_impl_did);
match dtor_self_type.sty {
ty::TyEnum(adt_def, self_to_impl_substs) |
ty::TyStruct(adt_def, self_to_impl_substs) => {
- ensure_drop_params_and_item_params_correspond(tcx,
+ ensure_drop_params_and_item_params_correspond(ccx,
drop_impl_did,
dtor_generics,
&dtor_self_type,
adt_def.did)?;
- ensure_drop_predicates_are_implied_by_item_defn(tcx,
+ ensure_drop_predicates_are_implied_by_item_defn(ccx,
drop_impl_did,
&dtor_predicates,
adt_def.did,
_ => {
// Destructors only work on nominal types. This was
// already checked by coherence, so we can panic here.
- let span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP);
+ let span = ccx.tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP);
span_bug!(span,
"should have been rejected by coherence check: {}",
dtor_self_type);
}
}
-fn ensure_drop_params_and_item_params_correspond<'tcx>(
- tcx: &TyCtxt<'tcx>,
+fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>(
+ ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
drop_impl_generics: &ty::Generics<'tcx>,
drop_impl_ty: &ty::Ty<'tcx>,
self_type_did: DefId) -> Result<(), ()>
{
+ let tcx = ccx.tcx;
let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap();
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
// check that the impl type can be made to match the trait type.
let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id);
- let infcx = infer::new_infer_ctxt(tcx,
- &tcx.tables,
- Some(impl_param_env),
- ProjectionMode::AnyFinal);
- let mut fulfillment_cx = traits::FulfillmentContext::new();
+ tcx.infer_ctxt(None, Some(impl_param_env), ProjectionMode::AnyFinal).enter(|infcx| {
+ let tcx = infcx.tcx;
+ let mut fulfillment_cx = traits::FulfillmentContext::new();
- let named_type = tcx.lookup_item_type(self_type_did).ty;
- let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
+ let named_type = tcx.lookup_item_type(self_type_did).ty;
+ let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
- let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP);
- let fresh_impl_substs =
- infcx.fresh_substs_for_generics(drop_impl_span, drop_impl_generics);
- let fresh_impl_self_ty = drop_impl_ty.subst(tcx, &fresh_impl_substs);
-
- if let Err(_) = infer::mk_eqty(&infcx, true, infer::TypeOrigin::Misc(drop_impl_span),
- named_type, fresh_impl_self_ty) {
- let item_span = tcx.map.span(self_type_node_id);
- struct_span_err!(tcx.sess, drop_impl_span, E0366,
- "Implementations of Drop cannot be specialized")
- .span_note(item_span,
- "Use same sequence of generic type and region \
- parameters that is on the struct/enum definition")
- .emit();
- return Err(());
- }
+ let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP);
+ let fresh_impl_substs =
+ infcx.fresh_substs_for_generics(drop_impl_span, drop_impl_generics);
+ let fresh_impl_self_ty = drop_impl_ty.subst(tcx, &fresh_impl_substs);
- if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
- // this could be reached when we get lazy normalization
- traits::report_fulfillment_errors(&infcx, errors);
- return Err(());
- }
+ if let Err(_) = infcx.eq_types(true, infer::TypeOrigin::Misc(drop_impl_span),
+ named_type, fresh_impl_self_ty) {
+ let item_span = tcx.map.span(self_type_node_id);
+ struct_span_err!(tcx.sess, drop_impl_span, E0366,
+ "Implementations of Drop cannot be specialized")
+ .span_note(item_span,
+ "Use same sequence of generic type and region \
+ parameters that is on the struct/enum definition")
+ .emit();
+ return Err(());
+ }
+
+ if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) {
+ // this could be reached when we get lazy normalization
+ infcx.report_fulfillment_errors(errors);
+ return Err(());
+ }
if let Err(ref errors) = fulfillment_cx.select_rfc1592_obligations(&infcx) {
- traits::report_fulfillment_errors_as_warnings(&infcx, errors,
- drop_impl_node_id);
+ infcx.report_fulfillment_errors_as_warnings(errors, drop_impl_node_id);
}
- let free_regions = FreeRegionMap::new();
- infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
- Ok(())
+ let free_regions = FreeRegionMap::new();
+ infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id);
+ Ok(())
+ })
}
/// Confirms that every predicate imposed by dtor_predicates is
/// implied by assuming the predicates attached to self_type_did.
-fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
- tcx: &TyCtxt<'tcx>,
+fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>(
+ ccx: &CrateCtxt<'a, 'tcx>,
drop_impl_did: DefId,
dtor_predicates: &ty::GenericPredicates<'tcx>,
self_type_did: DefId,
// absent. So we report an error that the Drop impl injected a
// predicate that is not present on the struct definition.
+ let tcx = ccx.tcx;
+
let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap();
let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP);
/// ensuring that they do not access data nor invoke methods of
/// values that have been previously dropped).
///
-pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
- typ: ty::Ty<'tcx>,
- span: Span,
- scope: region::CodeExtent) {
+pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
+ rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
+ typ: ty::Ty<'tcx>,
+ span: Span,
+ scope: region::CodeExtent)
+{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
typ, scope);
- let parent_scope = rcx.tcx().region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
+ let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
});
match result {
Ok(()) => {}
Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
- let tcx = rcx.tcx();
+ let tcx = rcx.tcx;
let mut err = struct_span_err!(tcx.sess, span, E0320,
"overflow while adding drop-check rules for {}", typ);
match *ctxt {
}
}
-struct DropckContext<'a, 'b: 'a, 'tcx: 'b> {
- rcx: &'a mut Rcx<'b, 'tcx>,
+struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
+ rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
/// types that have already been traversed
breadcrumbs: FnvHashSet<Ty<'tcx>>,
/// span for error reporting
}
// `context` is used for reporting overflow errors
-fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>(
- cx: &mut DropckContext<'a, 'b, 'tcx>,
+fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
+ cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
context: TypeContext,
ty: Ty<'tcx>,
depth: usize) -> Result<(), Error<'tcx>>
{
- let tcx = cx.rcx.tcx();
+ let tcx = cx.rcx.tcx;
// Issue #22443: Watch out for overflow. While we are careful to
// handle regular types properly, non-regular ones cause problems.
let recursion_limit = tcx.sess.recursion_limit.get();
// canoncialize the regions in `ty` before inserting - infinitely many
// region variables can refer to the same region.
- let ty = cx.rcx.infcx().resolve_type_and_region_vars_if_possible(&ty);
+ let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
if !cx.breadcrumbs.insert(ty) {
debug!("iterate_over_potentially_unsafe_regions_in_type \
(0..depth).map(|_| ' ').collect::<String>(),
ty);
- regionck::type_must_outlive(cx.rcx,
- infer::SubregionOrigin::SafeDestructor(cx.span),
- ty,
- ty::ReScope(cx.parent_scope));
+ cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
+ ty, ty::ReScope(cx.parent_scope));
return Ok(());
}
for variant in &def.variants {
for field in variant.fields.iter() {
let fty = field.ty(tcx, substs);
- let fty = cx.rcx.fcx.resolve_type_vars_if_possible(
+ let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
iterate_over_potentially_unsafe_regions_in_type(
cx,
Ok(())
}
- ty::TyTuple(ref tys) |
- ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) => {
+ ty::TyTuple(tys) |
+ ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
}
}
-fn has_dtor_of_interest<'tcx>(tcx: &TyCtxt<'tcx>,
- ty: ty::Ty<'tcx>) -> bool {
+fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyEnum(def, _) | ty::TyStruct(def, _) => {
def.is_dtorck(tcx)
use intrinsics;
use rustc::ty::subst::{self, Substs};
use rustc::ty::FnSig;
-use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::{self, Ty};
use {CrateCtxt, require_same_types};
use std::collections::{HashMap};
use rustc::hir;
-fn equate_intrinsic_type<'a, 'tcx>(tcx: &TyCtxt<'tcx>, it: &hir::ForeignItem,
+fn equate_intrinsic_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ it: &hir::ForeignItem,
n_tps: usize,
abi: Abi,
inputs: Vec<ty::Ty<'tcx>>,
output: ty::FnOutput<'tcx>) {
+ let tcx = ccx.tcx;
let def_id = tcx.map.local_def_id(it.id);
let i_ty = tcx.lookup_item_type(def_id);
let mut substs = Substs::empty();
substs.types = i_ty.generics.types.map(|def| tcx.mk_param_from_def(def));
- let fty = tcx.mk_fn_def(def_id, tcx.mk_substs(substs), ty::BareFnTy {
+ let fty = tcx.mk_fn_def(def_id, tcx.mk_substs(substs),
+ tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: abi,
sig: ty::Binder(FnSig {
output: output,
variadic: false,
}),
- });
+ }));
let i_n_tps = i_ty.generics.types.len(subst::FnSpace);
if i_n_tps != n_tps {
span_err!(tcx.sess, it.span, E0094,
parameters: found {}, expected {}",
i_n_tps, n_tps);
} else {
- require_same_types(tcx,
- None,
- false,
+ require_same_types(ccx,
it.span,
i_ty.ty,
fty,
"try" => {
let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8);
- let fn_ty = ty::BareFnTy {
+ let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
sig: ty::Binder(FnSig {
output: ty::FnOutput::FnConverging(tcx.mk_nil()),
variadic: false,
}),
- };
+ });
(0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32)
}
};
(n_tps, inputs, ty::FnConverging(output))
};
- equate_intrinsic_type(
- tcx,
- it,
- n_tps,
- Abi::RustIntrinsic,
- inputs,
- output
- )
+ equate_intrinsic_type(ccx, it, n_tps, Abi::RustIntrinsic, inputs, output)
}
/// Type-check `extern "platform-intrinsic" { ... }` functions.
}
let input_pairs = intr.inputs.iter().zip(&sig.inputs);
for (i, (expected_arg, arg)) in input_pairs.enumerate() {
- match_intrinsic_type_to_type(tcx, &format!("argument {}", i + 1), it.span,
+ match_intrinsic_type_to_type(ccx, &format!("argument {}", i + 1), it.span,
&mut structural_to_nomimal, expected_arg, arg);
}
- match_intrinsic_type_to_type(tcx, "return value", it.span,
+ match_intrinsic_type_to_type(ccx, "return value", it.span,
&mut structural_to_nomimal,
&intr.output, sig.output.unwrap());
return
}
};
- equate_intrinsic_type(
- tcx,
- it,
- n_tps,
- Abi::PlatformIntrinsic,
- inputs,
- ty::FnConverging(output)
- )
+ equate_intrinsic_type(ccx, it, n_tps, Abi::PlatformIntrinsic,
+ inputs, ty::FnConverging(output))
}
// walk the expected type and the actual type in lock step, checking they're
// the same, in a kinda-structural way, i.e. `Vector`s have to be simd structs with
// exactly the right element type
fn match_intrinsic_type_to_type<'tcx, 'a>(
- tcx: &TyCtxt<'tcx>,
+ ccx: &CrateCtxt<'a, 'tcx>,
position: &str,
span: Span,
structural_to_nominal: &mut HashMap<&'a intrinsics::Type, ty::Ty<'tcx>>,
use intrinsics::Type::*;
let simple_error = |real: &str, expected: &str| {
- span_err!(tcx.sess, span, E0442,
+ span_err!(ccx.tcx.sess, span, E0442,
"intrinsic {} has wrong type: found {}, expected {}",
position, real, expected)
};
simple_error(&format!("`{}`", t),
if const_ {"const pointer"} else {"mut pointer"})
}
- match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal,
+ match_intrinsic_type_to_type(ccx, position, span, structural_to_nominal,
inner_expected, ty)
}
_ => simple_error(&format!("`{}`", t), "raw pointer"),
simple_error(&format!("non-simd type `{}`", t), "simd type");
return;
}
- let t_len = t.simd_size(tcx);
+ let t_len = t.simd_size(ccx.tcx);
if len as usize != t_len {
simple_error(&format!("vector with length {}", t_len),
&format!("length {}", len));
return;
}
- let t_ty = t.simd_type(tcx);
+ let t_ty = t.simd_type(ccx.tcx);
{
// check that a given structural type always has the same an intrinsic definition
let previous = structural_to_nominal.entry(expected).or_insert(t);
if *previous != t {
// this gets its own error code because it is non-trivial
- span_err!(tcx.sess, span, E0443,
+ span_err!(ccx.tcx.sess, span, E0443,
"intrinsic {} has wrong type: found `{}`, expected `{}` which \
was used for this vector type previously in this signature",
position,
return;
}
}
- match_intrinsic_type_to_type(tcx,
+ match_intrinsic_type_to_type(ccx,
position,
span,
structural_to_nominal,
}
Aggregate(_flatten, ref expected_contents) => {
match t.sty {
- ty::TyTuple(ref contents) => {
+ ty::TyTuple(contents) => {
if contents.len() != expected_contents.len() {
simple_error(&format!("tuple with length {}", contents.len()),
&format!("tuple with length {}", expected_contents.len()));
return
}
for (e, c) in expected_contents.iter().zip(contents) {
- match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal,
+ match_intrinsic_type_to_type(ccx, position, span, structural_to_nominal,
e, c)
}
}
use super::probe;
-use check::{self, FnCtxt, callee, demand};
+use check::{FnCtxt, callee};
use check::UnresolvedTypeAction;
use hir::def_id::DefId;
use rustc::ty::subst::{self};
use rustc::traits;
-use rustc::ty::{self, NoPreference, PreferMutLvalue, Ty, TyCtxt};
+use rustc::ty::{self, NoPreference, PreferMutLvalue, Ty};
use rustc::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr};
use rustc::ty::fold::TypeFoldable;
-use rustc::infer;
-use rustc::infer::{InferCtxt, TypeOrigin};
+use rustc::infer::{self, InferOk, TypeOrigin};
use syntax::codemap::Span;
use rustc::hir;
-struct ConfirmContext<'a, 'tcx:'a> {
- fcx: &'a FnCtxt<'a, 'tcx>,
+use std::ops::Deref;
+
+struct ConfirmContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a>{
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
span: Span,
- self_expr: &'tcx hir::Expr,
- call_expr: &'tcx hir::Expr,
+ self_expr: &'gcx hir::Expr,
+ call_expr: &'gcx hir::Expr,
+}
+
+impl<'a, 'gcx, 'tcx> Deref for ConfirmContext<'a, 'gcx, 'tcx> {
+ type Target = FnCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.fcx
+ }
}
struct InstantiatedMethodSig<'tcx> {
method_predicates: ty::InstantiatedPredicates<'tcx>,
}
-pub fn confirm<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- self_expr: &'tcx hir::Expr,
- call_expr: &'tcx hir::Expr,
- unadjusted_self_ty: Ty<'tcx>,
- pick: probe::Pick<'tcx>,
- supplied_method_types: Vec<Ty<'tcx>>)
- -> ty::MethodCallee<'tcx>
-{
- debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, supplied_method_types={:?})",
- unadjusted_self_ty,
- pick,
- supplied_method_types);
-
- let mut confirm_cx = ConfirmContext::new(fcx, span, self_expr, call_expr);
- confirm_cx.confirm(unadjusted_self_ty, pick, supplied_method_types)
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn confirm_method(&self,
+ span: Span,
+ self_expr: &'gcx hir::Expr,
+ call_expr: &'gcx hir::Expr,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: probe::Pick<'tcx>,
+ supplied_method_types: Vec<Ty<'tcx>>)
+ -> ty::MethodCallee<'tcx>
+ {
+ debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, supplied_method_types={:?})",
+ unadjusted_self_ty,
+ pick,
+ supplied_method_types);
+
+ let mut confirm_cx = ConfirmContext::new(self, span, self_expr, call_expr);
+ confirm_cx.confirm(unadjusted_self_ty, pick, supplied_method_types)
+ }
}
-impl<'a,'tcx> ConfirmContext<'a,'tcx> {
- fn new(fcx: &'a FnCtxt<'a, 'tcx>,
+impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> {
+ fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
span: Span,
- self_expr: &'tcx hir::Expr,
- call_expr: &'tcx hir::Expr)
- -> ConfirmContext<'a, 'tcx>
+ self_expr: &'gcx hir::Expr,
+ call_expr: &'gcx hir::Expr)
+ -> ConfirmContext<'a, 'gcx, 'tcx>
{
ConfirmContext { fcx: fcx, span: span, self_expr: self_expr, call_expr: call_expr }
}
let InstantiatedMethodSig {
method_sig, all_substs, method_predicates
} = self.instantiate_method_sig(&pick, all_substs);
- let all_substs = self.tcx().mk_substs(all_substs);
+ let all_substs = self.tcx.mk_substs(all_substs);
let method_self_ty = method_sig.inputs[0];
// Unify the (adjusted) self type with what the method expects.
// Create the method type
let def_id = pick.item.def_id();
let method_ty = pick.item.as_opt_method().unwrap();
- let fty = self.tcx().mk_fn_def(def_id, all_substs, ty::BareFnTy {
+ let fty = self.tcx.mk_fn_def(def_id, all_substs,
+ self.tcx.mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(method_sig),
unsafety: method_ty.fty.unsafety,
abi: method_ty.fty.abi.clone(),
- });
+ }));
// Add any trait/regions obligations specified on the method's type parameters.
self.add_obligations(fty, all_substs, &method_predicates);
-> Ty<'tcx>
{
let (autoref, unsize) = if let Some(mutbl) = pick.autoref {
- let region = self.infcx().next_region_var(infer::Autoref(self.span));
- let autoref = AutoPtr(self.tcx().mk_region(region), mutbl);
+ let region = self.next_region_var(infer::Autoref(self.span));
+ let autoref = AutoPtr(self.tcx.mk_region(region), mutbl);
(Some(autoref), pick.unsize.map(|target| {
- target.adjust_for_autoref(self.tcx(), Some(autoref))
+ target.adjust_for_autoref(self.tcx, Some(autoref))
}))
} else {
// No unsizing should be performed without autoref (at
// Commit the autoderefs by calling `autoderef again, but this
// time writing the results into the various tables.
- let (autoderefd_ty, n, result) = check::autoderef(self.fcx,
- self.span,
- unadjusted_self_ty,
- || Some(self.self_expr),
- UnresolvedTypeAction::Error,
- NoPreference,
- |_, n| {
+ let (autoderefd_ty, n, result) = self.autoderef(self.span,
+ unadjusted_self_ty,
+ || Some(self.self_expr),
+ UnresolvedTypeAction::Error,
+ NoPreference,
+ |_, n| {
if n == pick.autoderefs {
Some(())
} else {
assert_eq!(result, Some(()));
// Write out the final adjustment.
- self.fcx.write_adjustment(self.self_expr.id,
- AdjustDerefRef(AutoDerefRef {
+ self.write_adjustment(self.self_expr.id, AdjustDerefRef(AutoDerefRef {
autoderefs: pick.autoderefs,
autoref: autoref,
unsize: unsize
if let Some(target) = unsize {
target
} else {
- autoderefd_ty.adjust_for_autoref(self.tcx(), autoref)
+ autoderefd_ty.adjust_for_autoref(self.tcx, autoref)
}
}
match pick.kind {
probe::InherentImplPick => {
let impl_def_id = pick.item.container().id();
- assert!(self.tcx().impl_trait_ref(impl_def_id).is_none(),
+ assert!(self.tcx.impl_trait_ref(impl_def_id).is_none(),
"impl {:?} is not an inherent impl", impl_def_id);
- check::impl_self_ty(self.fcx, self.span, impl_def_id).substs
+ self.impl_self_ty(self.span, impl_def_id).substs
}
probe::ObjectPick => {
// been ruled out when we deemed the trait to be
// "object safe".
let original_poly_trait_ref =
- data.principal_trait_ref_with_self_ty(this.tcx(), object_ty);
+ data.principal_trait_ref_with_self_ty(this.tcx, object_ty);
let upcast_poly_trait_ref =
this.upcast(original_poly_trait_ref.clone(), trait_def_id);
let upcast_trait_ref =
// respectively, then we want to return the type
// parameters from the trait ([$A,$B]), not those from
// the impl ([$A,$B,$C]) not the receiver type ([$C]).
- let impl_polytype = check::impl_self_ty(self.fcx, self.span, impl_def_id);
+ let impl_polytype = self.impl_self_ty(self.span, impl_def_id);
let impl_trait_ref =
- self.fcx.instantiate_type_scheme(
+ self.instantiate_type_scheme(
self.span,
&impl_polytype.substs,
- &self.tcx().impl_trait_ref(impl_def_id).unwrap());
+ &self.tcx.impl_trait_ref(impl_def_id).unwrap());
impl_trait_ref.substs.clone()
}
probe::TraitPick => {
let trait_def_id = pick.item.container().id();
- let trait_def = self.tcx().lookup_trait_def(trait_def_id);
+ let trait_def = self.tcx.lookup_trait_def(trait_def_id);
// Make a trait reference `$0 : Trait<$1...$n>`
// consisting entirely of type variables. Later on in
// the process we will unify the transformed-self-type
// of the method with the actual type in order to
// unify some of these variables.
- self.infcx().fresh_substs_for_trait(self.span,
- &trait_def.generics,
- self.infcx().next_ty_var())
+ self.fresh_substs_for_trait(self.span,
+ &trait_def.generics,
+ self.next_ty_var())
}
probe::WhereClausePick(ref poly_trait_ref) => {
}
fn extract_trait_ref<R, F>(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R where
- F: FnMut(&mut ConfirmContext<'a, 'tcx>, Ty<'tcx>, &ty::TraitTy<'tcx>) -> R,
+ F: FnMut(&mut ConfirmContext<'a, 'gcx, 'tcx>, Ty<'tcx>, &ty::TraitTy<'tcx>) -> R,
{
// If we specified that this is an object method, then the
// self-type ought to be something that can be dereferenced to
// yield an object-type (e.g., `&Object` or `Box<Object>`
// etc).
- let (_, _, result) = check::autoderef(self.fcx,
- self.span,
- self_ty,
- || None,
- UnresolvedTypeAction::Error,
- NoPreference,
- |ty, _| {
+ let (_, _, result) = self.fcx.autoderef(self.span,
+ self_ty,
+ || None,
+ UnresolvedTypeAction::Error,
+ NoPreference,
+ |ty, _| {
match ty.sty {
ty::TyTrait(ref data) => Some(closure(self, ty, &data)),
_ => None,
//
// FIXME -- permit users to manually specify lifetimes
let method_regions =
- self.fcx.infcx().region_vars_for_defs(
+ self.region_vars_for_defs(
self.span,
pick.item.as_opt_method().unwrap()
.generics.regions.get_slice(subst::FnSpace));
let mut final_substs = subst::Substs { types: types, regions: regions };
if num_supplied_types == 0 {
- self.fcx.infcx().type_vars_for_defs(
+ self.type_vars_for_defs(
self.span,
subst::FnSpace,
&mut final_substs,
method_types);
} else if num_method_types == 0 {
- span_err!(self.tcx().sess, self.span, E0035,
+ span_err!(self.tcx.sess, self.span, E0035,
"does not take type parameters");
- self.fcx.infcx().type_vars_for_defs(
+ self.type_vars_for_defs(
self.span,
subst::FnSpace,
&mut final_substs,
method_types);
} else if num_supplied_types != num_method_types {
- span_err!(self.tcx().sess, self.span, E0036,
+ span_err!(self.tcx.sess, self.span, E0036,
"incorrect number of type parameters given for this method: expected {}, found {}",
num_method_types, num_supplied_types);
final_substs.types.replace(
subst::FnSpace,
- vec![self.tcx().types.err; num_method_types]);
+ vec![self.tcx.types.err; num_method_types]);
} else {
final_substs.types.replace(subst::FnSpace, supplied_method_types);
}
self_ty: Ty<'tcx>,
method_self_ty: Ty<'tcx>)
{
- match self.fcx.mk_subty(false, TypeOrigin::Misc(self.span), self_ty, method_self_ty) {
- Ok(_) => {}
+ match self.sub_types(false, TypeOrigin::Misc(self.span),
+ self_ty, method_self_ty) {
+ Ok(InferOk { obligations, .. }) => {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ }
Err(_) => {
span_bug!(
self.span,
// type/early-bound-regions substitutions performed. There can
// be no late-bound regions appearing here.
let method_predicates = pick.item.as_opt_method().unwrap()
- .predicates.instantiate(self.tcx(), &all_substs);
- let method_predicates = self.fcx.normalize_associated_types_in(self.span,
- &method_predicates);
+ .predicates.instantiate(self.tcx, &all_substs);
+ let method_predicates = self.normalize_associated_types_in(self.span,
+ &method_predicates);
debug!("method_predicates after subst = {:?}",
method_predicates);
debug!("late-bound lifetimes from method instantiated, method_sig={:?}",
method_sig);
- let method_sig = self.fcx.instantiate_type_scheme(self.span, &all_substs, &method_sig);
+ let method_sig = self.instantiate_type_scheme(self.span, &all_substs, &method_sig);
debug!("type scheme substituted, method_sig={:?}",
method_sig);
all_substs,
method_predicates);
- self.fcx.add_obligations_for_parameters(
- traits::ObligationCause::misc(self.span, self.fcx.body_id),
+ self.add_obligations_for_parameters(
+ traits::ObligationCause::misc(self.span, self.body_id),
method_predicates);
// this is a projection from a trait reference, so we have to
// make sure that the trait reference inputs are well-formed.
- self.fcx.add_wf_bounds(
- all_substs,
- self.call_expr);
+ self.add_wf_bounds(all_substs, self.call_expr);
// the function type must also be well-formed (this is not
// implied by the substs being well-formed because of inherent
// impls and late-bound regions - see issue #28609).
- self.fcx.register_wf_obligation(fty, self.span, traits::MiscObligation);
+ self.register_wf_obligation(fty, self.span, traits::MiscObligation);
}
///////////////////////////////////////////////////////////////////////////
// Fix up autoderefs and derefs.
for (i, &expr) in exprs.iter().rev().enumerate() {
// Count autoderefs.
- let autoderef_count = match self.fcx
- .inh
- .tables
+ let autoderef_count = match self.tables
.borrow()
.adjustments
.get(&expr.id) {
i, expr, autoderef_count);
if autoderef_count > 0 {
- check::autoderef(self.fcx,
- expr.span,
- self.fcx.expr_ty(expr),
- || Some(expr),
- UnresolvedTypeAction::Error,
- PreferMutLvalue,
- |_, autoderefs| {
- if autoderefs == autoderef_count + 1 {
- Some(())
- } else {
- None
- }
- });
+ self.autoderef(expr.span,
+ self.expr_ty(expr),
+ || Some(expr),
+ UnresolvedTypeAction::Error,
+ PreferMutLvalue,
+ |_, autoderefs| {
+ if autoderefs == autoderef_count + 1 {
+ Some(())
+ } else {
+ None
+ }
+ });
}
// Don't retry the first one or we might infinite loop!
// expects. This is annoying and horrible. We
// ought to recode this routine so it doesn't
// (ab)use the normal type checking paths.
- let adj = self.fcx.inh.tables.borrow().adjustments.get(&base_expr.id)
- .cloned();
+ let adj = self.tables.borrow().adjustments.get(&base_expr.id).cloned();
let (autoderefs, unsize) = match adj {
Some(AdjustDerefRef(adr)) => match adr.autoref {
None => {
let (adjusted_base_ty, unsize) = if let Some(target) = unsize {
(target, true)
} else {
- (self.fcx.adjust_expr_ty(base_expr,
+ (self.adjust_expr_ty(base_expr,
Some(&AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: None,
unsize: None
}))), false)
};
- let index_expr_ty = self.fcx.expr_ty(&index_expr);
+ let index_expr_ty = self.expr_ty(&index_expr);
- let result = check::try_index_step(
- self.fcx,
+ let result = self.try_index_step(
ty::MethodCall::expr(expr.id),
expr,
&base_expr,
index_expr_ty);
if let Some((input_ty, return_ty)) = result {
- demand::suptype(self.fcx, index_expr.span, input_ty, index_expr_ty);
+ self.demand_suptype(index_expr.span, input_ty, index_expr_ty);
- let expr_ty = self.fcx.expr_ty(&expr);
- demand::suptype(self.fcx, expr.span, expr_ty, return_ty);
+ let expr_ty = self.expr_ty(&expr);
+ self.demand_suptype(expr.span, expr_ty, return_ty);
}
}
hir::ExprUnary(hir::UnDeref, ref base_expr) => {
// if this is an overloaded deref, then re-evaluate with
// a preference for mut
let method_call = ty::MethodCall::expr(expr.id);
- if self.fcx.inh.tables.borrow().method_map.contains_key(&method_call) {
- let method = check::try_overloaded_deref(
- self.fcx,
- expr.span,
+ if self.tables.borrow().method_map.contains_key(&method_call) {
+ let method = self.try_overloaded_deref(expr.span,
Some(&base_expr),
- self.fcx.expr_ty(&base_expr),
+ self.expr_ty(&base_expr),
PreferMutLvalue);
let method = method.expect("re-trying deref failed");
- self.fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
+ self.tables.borrow_mut().method_map.insert(method_call, method);
}
}
_ => {}
///////////////////////////////////////////////////////////////////////////
// MISCELLANY
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
- self.fcx.tcx()
- }
-
- fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> {
- self.fcx.infcx()
- }
-
fn enforce_illegal_method_limitations(&self, pick: &probe::Pick) {
// Disallow calls to the method `drop` defined in the `Drop` trait.
match pick.item.container() {
ty::TraitContainer(trait_def_id) => {
- callee::check_legal_trait_for_method_call(self.fcx.ccx, self.span, trait_def_id)
+ callee::check_legal_trait_for_method_call(self.ccx, self.span, trait_def_id)
}
ty::ImplContainer(..) => {}
}
target_trait_def_id: DefId)
-> ty::PolyTraitRef<'tcx>
{
- let upcast_trait_refs = traits::upcast(self.tcx(),
- source_trait_ref.clone(),
- target_trait_def_id);
+ let upcast_trait_refs = self.tcx.upcast_choices(source_trait_ref.clone(),
+ target_trait_def_id);
// must be exactly one trait ref or we'd get an ambig error etc
if upcast_trait_refs.len() != 1 {
fn replace_late_bound_regions_with_fresh_var<T>(&self, value: &ty::Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
- self.infcx().replace_late_bound_regions_with_fresh_var(
+ self.fcx.replace_late_bound_regions_with_fresh_var(
self.span, infer::FnCall, value).0
}
}
use hir::def_id::DefId;
use rustc::ty::subst;
use rustc::traits;
-use rustc::ty::{self, TyCtxt, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable};
+use rustc::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable};
use rustc::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr};
use rustc::infer;
pub use self::MethodError::*;
pub use self::CandidateSource::*;
-pub use self::suggest::{report_error, AllTraitsVec};
+pub use self::suggest::AllTraitsVec;
mod confirm;
mod probe;
TraitSource(/* trait id */ DefId),
}
-/// Determines whether the type `self_ty` supports a method name `method_name` or not.
-pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- method_name: ast::Name,
- self_ty: ty::Ty<'tcx>,
- call_expr_id: ast::NodeId)
- -> bool
-{
- let mode = probe::Mode::MethodCall;
- match probe::probe(fcx, span, mode, method_name, self_ty, call_expr_id) {
- Ok(..) => true,
- Err(NoMatch(..)) => false,
- Err(Ambiguity(..)) => true,
- Err(ClosureAmbiguity(..)) => true,
- Err(PrivateMatch(..)) => true,
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ /// Determines whether the type `self_ty` supports a method name `method_name` or not.
+ pub fn method_exists(&self,
+ span: Span,
+ method_name: ast::Name,
+ self_ty: ty::Ty<'tcx>,
+ call_expr_id: ast::NodeId)
+ -> bool
+ {
+ let mode = probe::Mode::MethodCall;
+ match self.probe_method(span, mode, method_name, self_ty, call_expr_id) {
+ Ok(..) => true,
+ Err(NoMatch(..)) => false,
+ Err(Ambiguity(..)) => true,
+ Err(ClosureAmbiguity(..)) => true,
+ Err(PrivateMatch(..)) => true,
+ }
}
-}
-/// Performs method lookup. If lookup is successful, it will return the callee and store an
-/// appropriate adjustment for the self-expr. In some cases it may report an error (e.g., invoking
-/// the `drop` method).
-///
-/// # Arguments
-///
-/// Given a method call like `foo.bar::<T1,...Tn>(...)`:
-///
-/// * `fcx`: the surrounding `FnCtxt` (!)
-/// * `span`: the span for the method call
-/// * `method_name`: the name of the method being called (`bar`)
-/// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
-/// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
-/// * `self_expr`: the self expression (`foo`)
-pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- method_name: ast::Name,
- self_ty: ty::Ty<'tcx>,
- supplied_method_types: Vec<ty::Ty<'tcx>>,
- call_expr: &'tcx hir::Expr,
- self_expr: &'tcx hir::Expr)
- -> Result<ty::MethodCallee<'tcx>, MethodError<'tcx>>
-{
- debug!("lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
- method_name,
- self_ty,
- call_expr,
- self_expr);
-
- let mode = probe::Mode::MethodCall;
- let self_ty = fcx.infcx().resolve_type_vars_if_possible(&self_ty);
- let pick = probe::probe(fcx, span, mode, method_name, self_ty, call_expr.id)?;
-
- if let Some(import_id) = pick.import_id {
- fcx.tcx().used_trait_imports.borrow_mut().insert(import_id);
- }
+ /// Performs method lookup. If lookup is successful, it will return the callee
+ /// and store an appropriate adjustment for the self-expr. In some cases it may
+ /// report an error (e.g., invoking the `drop` method).
+ ///
+ /// # Arguments
+ ///
+ /// Given a method call like `foo.bar::<T1,...Tn>(...)`:
+ ///
+ /// * `fcx`: the surrounding `FnCtxt` (!)
+ /// * `span`: the span for the method call
+ /// * `method_name`: the name of the method being called (`bar`)
+ /// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
+ /// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
+ /// * `self_expr`: the self expression (`foo`)
+ pub fn lookup_method(&self,
+ span: Span,
+ method_name: ast::Name,
+ self_ty: ty::Ty<'tcx>,
+ supplied_method_types: Vec<ty::Ty<'tcx>>,
+ call_expr: &'gcx hir::Expr,
+ self_expr: &'gcx hir::Expr)
+ -> Result<ty::MethodCallee<'tcx>, MethodError<'tcx>>
+ {
+ debug!("lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
+ method_name,
+ self_ty,
+ call_expr,
+ self_expr);
+
+ let mode = probe::Mode::MethodCall;
+ let self_ty = self.resolve_type_vars_if_possible(&self_ty);
+ let pick = self.probe_method(span, mode, method_name, self_ty, call_expr.id)?;
+
+ if let Some(import_id) = pick.import_id {
+ self.tcx.used_trait_imports.borrow_mut().insert(import_id);
+ }
- Ok(confirm::confirm(fcx, span, self_expr, call_expr, self_ty, pick, supplied_method_types))
-}
+ Ok(self.confirm_method(span, self_expr, call_expr, self_ty, pick, supplied_method_types))
+ }
-pub fn lookup_in_trait<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- self_expr: Option<&hir::Expr>,
- m_name: ast::Name,
- trait_def_id: DefId,
- self_ty: ty::Ty<'tcx>,
- opt_input_types: Option<Vec<ty::Ty<'tcx>>>)
- -> Option<ty::MethodCallee<'tcx>>
-{
- lookup_in_trait_adjusted(fcx, span, self_expr, m_name, trait_def_id,
- 0, false, self_ty, opt_input_types)
-}
+ pub fn lookup_method_in_trait(&self,
+ span: Span,
+ self_expr: Option<&hir::Expr>,
+ m_name: ast::Name,
+ trait_def_id: DefId,
+ self_ty: ty::Ty<'tcx>,
+ opt_input_types: Option<Vec<ty::Ty<'tcx>>>)
+ -> Option<ty::MethodCallee<'tcx>>
+ {
+ self.lookup_method_in_trait_adjusted(span, self_expr, m_name, trait_def_id,
+ 0, false, self_ty, opt_input_types)
+ }
-/// `lookup_in_trait_adjusted` is used for overloaded operators. It does a very narrow slice of
-/// what the normal probe/confirm path does. In particular, it doesn't really do any probing: it
-/// simply constructs an obligation for a particular trait with the given self-type and checks
-/// whether that trait is implemented.
-///
-/// FIXME(#18741) -- It seems likely that we can consolidate some of this code with the other
-/// method-lookup code. In particular, autoderef on index is basically identical to autoderef with
-/// normal probes, except that the test also looks for built-in indexing. Also, the second half of
-/// this method is basically the same as confirmation.
-pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- self_expr: Option<&hir::Expr>,
- m_name: ast::Name,
- trait_def_id: DefId,
- autoderefs: usize,
- unsize: bool,
- self_ty: ty::Ty<'tcx>,
- opt_input_types: Option<Vec<ty::Ty<'tcx>>>)
- -> Option<ty::MethodCallee<'tcx>>
-{
- debug!("lookup_in_trait_adjusted(self_ty={:?}, self_expr={:?}, m_name={}, trait_def_id={:?})",
- self_ty,
- self_expr,
- m_name,
- trait_def_id);
-
- let trait_def = fcx.tcx().lookup_trait_def(trait_def_id);
-
- let type_parameter_defs = trait_def.generics.types.get_slice(subst::TypeSpace);
- let expected_number_of_input_types = type_parameter_defs.len();
-
- assert_eq!(trait_def.generics.types.len(subst::FnSpace), 0);
- assert!(trait_def.generics.regions.is_empty());
-
- // Construct a trait-reference `self_ty : Trait<input_tys>`
- let mut substs = subst::Substs::new_trait(Vec::new(), Vec::new(), self_ty);
-
- match opt_input_types {
- Some(input_types) => {
- assert_eq!(expected_number_of_input_types, input_types.len());
- substs.types.replace(subst::ParamSpace::TypeSpace, input_types);
- }
+ /// `lookup_in_trait_adjusted` is used for overloaded operators.
+ /// It does a very narrow slice of what the normal probe/confirm path does.
+ /// In particular, it doesn't really do any probing: it simply constructs
+ /// an obligation for aparticular trait with the given self-type and checks
+ /// whether that trait is implemented.
+ ///
+ /// FIXME(#18741) -- It seems likely that we can consolidate some of this
+ /// code with the other method-lookup code. In particular, autoderef on
+ /// index is basically identical to autoderef with normal probes, except
+ /// that the test also looks for built-in indexing. Also, the second half of
+ /// this method is basically the same as confirmation.
+ pub fn lookup_method_in_trait_adjusted(&self,
+ span: Span,
+ self_expr: Option<&hir::Expr>,
+ m_name: ast::Name,
+ trait_def_id: DefId,
+ autoderefs: usize,
+ unsize: bool,
+ self_ty: ty::Ty<'tcx>,
+ opt_input_types: Option<Vec<ty::Ty<'tcx>>>)
+ -> Option<ty::MethodCallee<'tcx>>
+ {
+ debug!("lookup_in_trait_adjusted(self_ty={:?}, self_expr={:?}, \
+ m_name={}, trait_def_id={:?})",
+ self_ty,
+ self_expr,
+ m_name,
+ trait_def_id);
+
+ let trait_def = self.tcx.lookup_trait_def(trait_def_id);
+
+ let type_parameter_defs = trait_def.generics.types.get_slice(subst::TypeSpace);
+ let expected_number_of_input_types = type_parameter_defs.len();
+
+ assert_eq!(trait_def.generics.types.len(subst::FnSpace), 0);
+ assert!(trait_def.generics.regions.is_empty());
+
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let mut substs = subst::Substs::new_trait(Vec::new(), Vec::new(), self_ty);
+
+ match opt_input_types {
+ Some(input_types) => {
+ assert_eq!(expected_number_of_input_types, input_types.len());
+ substs.types.replace(subst::ParamSpace::TypeSpace, input_types);
+ }
- None => {
- fcx.inh.infcx.type_vars_for_defs(
- span,
- subst::ParamSpace::TypeSpace,
- &mut substs,
- type_parameter_defs);
+ None => {
+ self.type_vars_for_defs(
+ span,
+ subst::ParamSpace::TypeSpace,
+ &mut substs,
+ type_parameter_defs);
+ }
}
- }
- let trait_ref = ty::TraitRef::new(trait_def_id, fcx.tcx().mk_substs(substs));
+ let trait_ref = ty::TraitRef::new(trait_def_id, self.tcx.mk_substs(substs));
- // Construct an obligation
- let poly_trait_ref = trait_ref.to_poly_trait_ref();
- let obligation = traits::Obligation::misc(span,
- fcx.body_id,
- poly_trait_ref.to_predicate());
-
- // Now we want to know if this can be matched
- let mut selcx = traits::SelectionContext::new(fcx.infcx());
- if !selcx.evaluate_obligation(&obligation) {
- debug!("--> Cannot match obligation");
- return None; // Cannot be matched, no such method resolution is possible.
- }
+ // Construct an obligation
+ let poly_trait_ref = trait_ref.to_poly_trait_ref();
+ let obligation = traits::Obligation::misc(span,
+ self.body_id,
+ poly_trait_ref.to_predicate());
- // Trait must have a method named `m_name` and it should not have
- // type parameters or early-bound regions.
- let tcx = fcx.tcx();
- let method_item = trait_item(tcx, trait_def_id, m_name).unwrap();
- let method_ty = method_item.as_opt_method().unwrap();
- assert_eq!(method_ty.generics.types.len(subst::FnSpace), 0);
- assert_eq!(method_ty.generics.regions.len(subst::FnSpace), 0);
-
- debug!("lookup_in_trait_adjusted: method_item={:?} method_ty={:?}",
- method_item, method_ty);
-
- // Instantiate late-bound regions and substitute the trait
- // parameters into the method type to get the actual method type.
- //
- // NB: Instantiate late-bound regions first so that
- // `instantiate_type_scheme` can normalize associated types that
- // may reference those regions.
- let fn_sig = fcx.infcx().replace_late_bound_regions_with_fresh_var(span,
- infer::FnCall,
- &method_ty.fty.sig).0;
- let fn_sig = fcx.instantiate_type_scheme(span, trait_ref.substs, &fn_sig);
- let transformed_self_ty = fn_sig.inputs[0];
- let def_id = method_item.def_id();
- let fty = tcx.mk_fn_def(def_id, trait_ref.substs, ty::BareFnTy {
- sig: ty::Binder(fn_sig),
- unsafety: method_ty.fty.unsafety,
- abi: method_ty.fty.abi.clone(),
- });
-
- debug!("lookup_in_trait_adjusted: matched method fty={:?} obligation={:?}",
- fty,
- obligation);
-
- // Register obligations for the parameters. This will include the
- // `Self` parameter, which in turn has a bound of the main trait,
- // so this also effectively registers `obligation` as well. (We
- // used to register `obligation` explicitly, but that resulted in
- // double error messages being reported.)
- //
- // Note that as the method comes from a trait, it should not have
- // any late-bound regions appearing in its bounds.
- let method_bounds = fcx.instantiate_bounds(span, trait_ref.substs, &method_ty.predicates);
- assert!(!method_bounds.has_escaping_regions());
- fcx.add_obligations_for_parameters(
- traits::ObligationCause::misc(span, fcx.body_id),
- &method_bounds);
-
- // Also register an obligation for the method type being well-formed.
- fcx.register_wf_obligation(fty, span, traits::MiscObligation);
-
- // FIXME(#18653) -- Try to resolve obligations, giving us more
- // typing information, which can sometimes be needed to avoid
- // pathological region inference failures.
- fcx.select_obligations_where_possible();
-
- // Insert any adjustments needed (always an autoref of some mutability).
- match self_expr {
- None => { }
-
- Some(self_expr) => {
- debug!("lookup_in_trait_adjusted: inserting adjustment if needed \
- (self-id={}, autoderefs={}, unsize={}, explicit_self={:?})",
- self_expr.id, autoderefs, unsize,
- method_ty.explicit_self);
-
- match method_ty.explicit_self {
- ty::ExplicitSelfCategory::ByValue => {
- // Trait method is fn(self), no transformation needed.
- assert!(!unsize);
- fcx.write_autoderef_adjustment(self_expr.id, autoderefs);
- }
+ // Now we want to know if this can be matched
+ let mut selcx = traits::SelectionContext::new(self);
+ if !selcx.evaluate_obligation(&obligation) {
+ debug!("--> Cannot match obligation");
+ return None; // Cannot be matched, no such method resolution is possible.
+ }
- ty::ExplicitSelfCategory::ByReference(..) => {
- // Trait method is fn(&self) or fn(&mut self), need an
- // autoref. Pull the region etc out of the type of first argument.
- match transformed_self_ty.sty {
- ty::TyRef(region, ty::TypeAndMut { mutbl, ty: _ }) => {
- fcx.write_adjustment(self_expr.id,
- AdjustDerefRef(AutoDerefRef {
- autoderefs: autoderefs,
- autoref: Some(AutoPtr(region, mutbl)),
- unsize: if unsize {
- Some(transformed_self_ty)
- } else {
- None
- }
- }));
- }
+ // Trait must have a method named `m_name` and it should not have
+ // type parameters or early-bound regions.
+ let tcx = self.tcx;
+ let method_item = self.trait_item(trait_def_id, m_name).unwrap();
+ let method_ty = method_item.as_opt_method().unwrap();
+ assert_eq!(method_ty.generics.types.len(subst::FnSpace), 0);
+ assert_eq!(method_ty.generics.regions.len(subst::FnSpace), 0);
+
+ debug!("lookup_in_trait_adjusted: method_item={:?} method_ty={:?}",
+ method_item, method_ty);
+
+ // Instantiate late-bound regions and substitute the trait
+ // parameters into the method type to get the actual method type.
+ //
+ // NB: Instantiate late-bound regions first so that
+ // `instantiate_type_scheme` can normalize associated types that
+ // may reference those regions.
+ let fn_sig = self.replace_late_bound_regions_with_fresh_var(span,
+ infer::FnCall,
+ &method_ty.fty.sig).0;
+ let fn_sig = self.instantiate_type_scheme(span, trait_ref.substs, &fn_sig);
+ let transformed_self_ty = fn_sig.inputs[0];
+ let def_id = method_item.def_id();
+ let fty = tcx.mk_fn_def(def_id, trait_ref.substs,
+ tcx.mk_bare_fn(ty::BareFnTy {
+ sig: ty::Binder(fn_sig),
+ unsafety: method_ty.fty.unsafety,
+ abi: method_ty.fty.abi.clone(),
+ }));
+
+ debug!("lookup_in_trait_adjusted: matched method fty={:?} obligation={:?}",
+ fty,
+ obligation);
+
+ // Register obligations for the parameters. This will include the
+ // `Self` parameter, which in turn has a bound of the main trait,
+ // so this also effectively registers `obligation` as well. (We
+ // used to register `obligation` explicitly, but that resulted in
+ // double error messages being reported.)
+ //
+ // Note that as the method comes from a trait, it should not have
+ // any late-bound regions appearing in its bounds.
+ let method_bounds = self.instantiate_bounds(span, trait_ref.substs, &method_ty.predicates);
+ assert!(!method_bounds.has_escaping_regions());
+ self.add_obligations_for_parameters(
+ traits::ObligationCause::misc(span, self.body_id),
+ &method_bounds);
+
+ // Also register an obligation for the method type being well-formed.
+ self.register_wf_obligation(fty, span, traits::MiscObligation);
+
+ // FIXME(#18653) -- Try to resolve obligations, giving us more
+ // typing information, which can sometimes be needed to avoid
+ // pathological region inference failures.
+ self.select_obligations_where_possible();
+
+ // Insert any adjustments needed (always an autoref of some mutability).
+ match self_expr {
+ None => { }
+
+ Some(self_expr) => {
+ debug!("lookup_in_trait_adjusted: inserting adjustment if needed \
+ (self-id={}, autoderefs={}, unsize={}, explicit_self={:?})",
+ self_expr.id, autoderefs, unsize,
+ method_ty.explicit_self);
+
+ match method_ty.explicit_self {
+ ty::ExplicitSelfCategory::ByValue => {
+ // Trait method is fn(self), no transformation needed.
+ assert!(!unsize);
+ self.write_autoderef_adjustment(self_expr.id, autoderefs);
+ }
- _ => {
- span_bug!(
- span,
- "trait method is &self but first arg is: {}",
- transformed_self_ty);
+ ty::ExplicitSelfCategory::ByReference(..) => {
+ // Trait method is fn(&self) or fn(&mut self), need an
+ // autoref. Pull the region etc out of the type of first argument.
+ match transformed_self_ty.sty {
+ ty::TyRef(region, ty::TypeAndMut { mutbl, ty: _ }) => {
+ self.write_adjustment(self_expr.id,
+ AdjustDerefRef(AutoDerefRef {
+ autoderefs: autoderefs,
+ autoref: Some(AutoPtr(region, mutbl)),
+ unsize: if unsize {
+ Some(transformed_self_ty)
+ } else {
+ None
+ }
+ }));
+ }
+
+ _ => {
+ span_bug!(
+ span,
+ "trait method is &self but first arg is: {}",
+ transformed_self_ty);
+ }
}
}
- }
- _ => {
- span_bug!(
- span,
- "unexpected explicit self type in operator method: {:?}",
- method_ty.explicit_self);
+ _ => {
+ span_bug!(
+ span,
+ "unexpected explicit self type in operator method: {:?}",
+ method_ty.explicit_self);
+ }
}
}
}
- }
- let callee = ty::MethodCallee {
- def_id: def_id,
- ty: fty,
- substs: trait_ref.substs
- };
+ let callee = ty::MethodCallee {
+ def_id: def_id,
+ ty: fty,
+ substs: trait_ref.substs
+ };
- debug!("callee = {:?}", callee);
+ debug!("callee = {:?}", callee);
- Some(callee)
-}
-
-pub fn resolve_ufcs<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- method_name: ast::Name,
- self_ty: ty::Ty<'tcx>,
- expr_id: ast::NodeId)
- -> Result<Def, MethodError<'tcx>>
-{
- let mode = probe::Mode::Path;
- let pick = probe::probe(fcx, span, mode, method_name, self_ty, expr_id)?;
-
- if let Some(import_id) = pick.import_id {
- fcx.tcx().used_trait_imports.borrow_mut().insert(import_id);
+ Some(callee)
}
- let def = pick.item.def();
- if let probe::InherentImplPick = pick.kind {
- if !pick.item.vis().is_accessible_from(fcx.body_id, &fcx.tcx().map) {
- let msg = format!("{} `{}` is private", def.kind_name(), &method_name.as_str());
- fcx.tcx().sess.span_err(span, &msg);
+ pub fn resolve_ufcs(&self,
+ span: Span,
+ method_name: ast::Name,
+ self_ty: ty::Ty<'tcx>,
+ expr_id: ast::NodeId)
+ -> Result<Def, MethodError<'tcx>>
+ {
+ let mode = probe::Mode::Path;
+ let pick = self.probe_method(span, mode, method_name, self_ty, expr_id)?;
+
+ if let Some(import_id) = pick.import_id {
+ self.tcx.used_trait_imports.borrow_mut().insert(import_id);
+ }
+
+ let def = pick.item.def();
+ if let probe::InherentImplPick = pick.kind {
+ if !pick.item.vis().is_accessible_from(self.body_id, &self.tcx.map) {
+ let msg = format!("{} `{}` is private", def.kind_name(), &method_name.as_str());
+ self.tcx.sess.span_err(span, &msg);
+ }
}
+ Ok(def)
}
- Ok(def)
-}
-/// Find item with name `item_name` defined in `trait_def_id`
-/// and return it, or `None`, if no such item.
-fn trait_item<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId,
- item_name: ast::Name)
- -> Option<ty::ImplOrTraitItem<'tcx>>
-{
- let trait_items = tcx.trait_items(trait_def_id);
- trait_items.iter()
- .find(|item| item.name() == item_name)
- .cloned()
-}
+ /// Find item with name `item_name` defined in `trait_def_id`
+ /// and return it, or `None`, if no such item.
+ pub fn trait_item(&self,
+ trait_def_id: DefId,
+ item_name: ast::Name)
+ -> Option<ty::ImplOrTraitItem<'tcx>>
+ {
+ let trait_items = self.tcx.trait_items(trait_def_id);
+ trait_items.iter()
+ .find(|item| item.name() == item_name)
+ .cloned()
+ }
-fn impl_item<'tcx>(tcx: &TyCtxt<'tcx>,
- impl_def_id: DefId,
- item_name: ast::Name)
- -> Option<ty::ImplOrTraitItem<'tcx>>
-{
- let impl_items = tcx.impl_items.borrow();
- let impl_items = impl_items.get(&impl_def_id).unwrap();
- impl_items
- .iter()
- .map(|&did| tcx.impl_or_trait_item(did.def_id()))
- .find(|m| m.name() == item_name)
+ pub fn impl_item(&self,
+ impl_def_id: DefId,
+ item_name: ast::Name)
+ -> Option<ty::ImplOrTraitItem<'tcx>>
+ {
+ let impl_items = self.tcx.impl_items.borrow();
+ let impl_items = impl_items.get(&impl_def_id).unwrap();
+ impl_items
+ .iter()
+ .map(|&did| self.tcx.impl_or_trait_item(did.def_id()))
+ .find(|m| m.name() == item_name)
+ }
}
use super::{CandidateSource, ImplSource, TraitSource};
use super::suggest;
-use check;
use check::{FnCtxt, UnresolvedTypeAction};
use hir::def_id::DefId;
use hir::def::Def;
use rustc::ty::subst;
use rustc::ty::subst::Subst;
use rustc::traits;
-use rustc::ty::{self, NoPreference, Ty, TyCtxt, ToPolyTraitRef, TraitRef, TypeFoldable};
-use rustc::infer::{self, InferCtxt, InferOk, TypeOrigin};
+use rustc::ty::{self, NoPreference, Ty, ToPolyTraitRef, TraitRef, TypeFoldable};
+use rustc::infer::{InferOk, TypeOrigin};
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
use rustc::hir;
use std::collections::HashSet;
use std::mem;
+use std::ops::Deref;
use std::rc::Rc;
use self::CandidateKind::*;
pub use self::PickKind::*;
-struct ProbeContext<'a, 'tcx:'a> {
- fcx: &'a FnCtxt<'a, 'tcx>,
+struct ProbeContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
span: Span,
mode: Mode,
item_name: ast::Name,
unsatisfied_predicates: Vec<TraitRef<'tcx>>
}
+impl<'a, 'gcx, 'tcx> Deref for ProbeContext<'a, 'gcx, 'tcx> {
+ type Target = FnCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.fcx
+ }
+}
+
#[derive(Debug)]
struct CandidateStep<'tcx> {
self_ty: Ty<'tcx>,
Path
}
-pub fn probe<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- mode: Mode,
- item_name: ast::Name,
- self_ty: Ty<'tcx>,
- scope_expr_id: ast::NodeId)
- -> PickResult<'tcx>
-{
- debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})",
- self_ty,
- item_name,
- scope_expr_id);
-
- // FIXME(#18741) -- right now, creating the steps involves evaluating the
- // `*` operator, which registers obligations that then escape into
- // the global fulfillment context and thus has global
- // side-effects. This is a bit of a pain to refactor. So just let
- // it ride, although it's really not great, and in fact could I
- // think cause spurious errors. Really though this part should
- // take place in the `fcx.infcx().probe` below.
- let steps = if mode == Mode::MethodCall {
- match create_steps(fcx, span, self_ty) {
- Some(steps) => steps,
- None =>return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), Vec::new(),
- Vec::new(), mode))),
- }
- } else {
- vec![CandidateStep {
- self_ty: self_ty,
- autoderefs: 0,
- unsize: false
- }]
- };
-
- // Create a list of simplified self types, if we can.
- let mut simplified_steps = Vec::new();
- for step in &steps {
- match ty::fast_reject::simplify_type(fcx.tcx(), step.self_ty, true) {
- None => { break; }
- Some(simplified_type) => { simplified_steps.push(simplified_type); }
- }
- }
- let opt_simplified_steps =
- if simplified_steps.len() < steps.len() {
- None // failed to convert at least one of the steps
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn probe_method(&self,
+ span: Span,
+ mode: Mode,
+ item_name: ast::Name,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: ast::NodeId)
+ -> PickResult<'tcx>
+ {
+ debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})",
+ self_ty,
+ item_name,
+ scope_expr_id);
+
+ // FIXME(#18741) -- right now, creating the steps involves evaluating the
+ // `*` operator, which registers obligations that then escape into
+ // the global fulfillment context and thus has global
+ // side-effects. This is a bit of a pain to refactor. So just let
+ // it ride, although it's really not great, and in fact could I
+ // think cause spurious errors. Really though this part should
+ // take place in the `self.probe` below.
+ let steps = if mode == Mode::MethodCall {
+ match self.create_steps(span, self_ty) {
+ Some(steps) => steps,
+ None =>return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), Vec::new(),
+ Vec::new(), mode))),
+ }
} else {
- Some(simplified_steps)
+ vec![CandidateStep {
+ self_ty: self_ty,
+ autoderefs: 0,
+ unsize: false
+ }]
};
- debug!("ProbeContext: steps for self_ty={:?} are {:?}",
- self_ty,
- steps);
-
- // this creates one big transaction so that all type variables etc
- // that we create during the probe process are removed later
- fcx.infcx().probe(|_| {
- let mut probe_cx = ProbeContext::new(fcx,
- span,
- mode,
- item_name,
- steps,
- opt_simplified_steps);
- probe_cx.assemble_inherent_candidates();
- probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?;
- probe_cx.pick()
- })
-}
+ // Create a list of simplified self types, if we can.
+ let mut simplified_steps = Vec::new();
+ for step in &steps {
+ match ty::fast_reject::simplify_type(self.tcx, step.self_ty, true) {
+ None => { break; }
+ Some(simplified_type) => { simplified_steps.push(simplified_type); }
+ }
+ }
+ let opt_simplified_steps =
+ if simplified_steps.len() < steps.len() {
+ None // failed to convert at least one of the steps
+ } else {
+ Some(simplified_steps)
+ };
-fn create_steps<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- self_ty: Ty<'tcx>)
- -> Option<Vec<CandidateStep<'tcx>>> {
- let mut steps = Vec::new();
-
- let (final_ty, dereferences, _) = check::autoderef(fcx,
- span,
- self_ty,
- || None,
- UnresolvedTypeAction::Error,
- NoPreference,
- |t, d| {
- steps.push(CandidateStep {
- self_ty: t,
- autoderefs: d,
- unsize: false
- });
- None::<()> // keep iterating until we can't anymore
- });
+ debug!("ProbeContext: steps for self_ty={:?} are {:?}",
+ self_ty,
+ steps);
+
+ // this creates one big transaction so that all type variables etc
+ // that we create during the probe process are removed later
+ self.probe(|_| {
+ let mut probe_cx = ProbeContext::new(self,
+ span,
+ mode,
+ item_name,
+ steps,
+ opt_simplified_steps);
+ probe_cx.assemble_inherent_candidates();
+ probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?;
+ probe_cx.pick()
+ })
+ }
- match final_ty.sty {
- ty::TyArray(elem_ty, _) => {
+ fn create_steps(&self,
+ span: Span,
+ self_ty: Ty<'tcx>)
+ -> Option<Vec<CandidateStep<'tcx>>> {
+ let mut steps = Vec::new();
+
+ let (final_ty, dereferences, _) = self.autoderef(span,
+ self_ty,
+ || None,
+ UnresolvedTypeAction::Error,
+ NoPreference,
+ |t, d| {
steps.push(CandidateStep {
- self_ty: fcx.tcx().mk_slice(elem_ty),
- autoderefs: dereferences,
- unsize: true
+ self_ty: t,
+ autoderefs: d,
+ unsize: false
});
+ None::<()> // keep iterating until we can't anymore
+ });
+
+ match final_ty.sty {
+ ty::TyArray(elem_ty, _) => {
+ steps.push(CandidateStep {
+ self_ty: self.tcx.mk_slice(elem_ty),
+ autoderefs: dereferences,
+ unsize: true
+ });
+ }
+ ty::TyError => return None,
+ _ => (),
}
- ty::TyError => return None,
- _ => (),
- }
- Some(steps)
+ Some(steps)
+ }
}
-impl<'a,'tcx> ProbeContext<'a,'tcx> {
- fn new(fcx: &'a FnCtxt<'a,'tcx>,
+impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> {
+ fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
span: Span,
mode: Mode,
item_name: ast::Name,
steps: Vec<CandidateStep<'tcx>>,
opt_simplified_steps: Option<Vec<ty::fast_reject::SimplifiedType>>)
- -> ProbeContext<'a,'tcx>
+ -> ProbeContext<'a, 'gcx, 'tcx>
{
ProbeContext {
fcx: fcx,
self.private_candidate = None;
}
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
- self.fcx.tcx()
- }
-
- fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> {
- self.fcx.infcx()
- }
-
///////////////////////////////////////////////////////////////////////////
// CANDIDATE ASSEMBLY
self.assemble_inherent_impl_candidates_for_type(def.did);
}
ty::TyBox(_) => {
- if let Some(box_did) = self.tcx().lang_items.owned_box() {
+ if let Some(box_did) = self.tcx.lang_items.owned_box() {
self.assemble_inherent_impl_candidates_for_type(box_did);
}
}
self.assemble_inherent_candidates_from_param(self_ty, p);
}
ty::TyChar => {
- let lang_def_id = self.tcx().lang_items.char_impl();
+ let lang_def_id = self.tcx.lang_items.char_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyStr => {
- let lang_def_id = self.tcx().lang_items.str_impl();
+ let lang_def_id = self.tcx.lang_items.str_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TySlice(_) => {
- let lang_def_id = self.tcx().lang_items.slice_impl();
+ let lang_def_id = self.tcx.lang_items.slice_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
- let lang_def_id = self.tcx().lang_items.const_ptr_impl();
+ let lang_def_id = self.tcx.lang_items.const_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
- let lang_def_id = self.tcx().lang_items.mut_ptr_impl();
+ let lang_def_id = self.tcx.lang_items.mut_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyInt(ast::IntTy::I8) => {
- let lang_def_id = self.tcx().lang_items.i8_impl();
+ let lang_def_id = self.tcx.lang_items.i8_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyInt(ast::IntTy::I16) => {
- let lang_def_id = self.tcx().lang_items.i16_impl();
+ let lang_def_id = self.tcx.lang_items.i16_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyInt(ast::IntTy::I32) => {
- let lang_def_id = self.tcx().lang_items.i32_impl();
+ let lang_def_id = self.tcx.lang_items.i32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyInt(ast::IntTy::I64) => {
- let lang_def_id = self.tcx().lang_items.i64_impl();
+ let lang_def_id = self.tcx.lang_items.i64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyInt(ast::IntTy::Is) => {
- let lang_def_id = self.tcx().lang_items.isize_impl();
+ let lang_def_id = self.tcx.lang_items.isize_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyUint(ast::UintTy::U8) => {
- let lang_def_id = self.tcx().lang_items.u8_impl();
+ let lang_def_id = self.tcx.lang_items.u8_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyUint(ast::UintTy::U16) => {
- let lang_def_id = self.tcx().lang_items.u16_impl();
+ let lang_def_id = self.tcx.lang_items.u16_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyUint(ast::UintTy::U32) => {
- let lang_def_id = self.tcx().lang_items.u32_impl();
+ let lang_def_id = self.tcx.lang_items.u32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyUint(ast::UintTy::U64) => {
- let lang_def_id = self.tcx().lang_items.u64_impl();
+ let lang_def_id = self.tcx.lang_items.u64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyUint(ast::UintTy::Us) => {
- let lang_def_id = self.tcx().lang_items.usize_impl();
+ let lang_def_id = self.tcx.lang_items.usize_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyFloat(ast::FloatTy::F32) => {
- let lang_def_id = self.tcx().lang_items.f32_impl();
+ let lang_def_id = self.tcx.lang_items.f32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
ty::TyFloat(ast::FloatTy::F64) => {
- let lang_def_id = self.tcx().lang_items.f64_impl();
+ let lang_def_id = self.tcx.lang_items.f64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
_ => {
fn assemble_inherent_impl_for_primitive(&mut self, lang_def_id: Option<DefId>) {
if let Some(impl_def_id) = lang_def_id {
- self.tcx().populate_implementations_for_primitive_if_necessary(impl_def_id);
+ self.tcx.populate_implementations_for_primitive_if_necessary(impl_def_id);
self.assemble_inherent_impl_probe(impl_def_id);
}
fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) {
// Read the inherent implementation candidates for this type from the
// metadata if necessary.
- self.tcx().populate_inherent_implementations_for_type_if_necessary(def_id);
+ self.tcx.populate_inherent_implementations_for_type_if_necessary(def_id);
- if let Some(impl_infos) = self.tcx().inherent_impls.borrow().get(&def_id) {
+ if let Some(impl_infos) = self.tcx.inherent_impls.borrow().get(&def_id) {
for &impl_def_id in impl_infos.iter() {
self.assemble_inherent_impl_probe(impl_def_id);
}
debug!("assemble_inherent_impl_probe {:?}", impl_def_id);
- let item = match impl_item(self.tcx(), impl_def_id, self.item_name) {
+ let item = match self.impl_item(impl_def_id) {
Some(m) => m,
None => { return; } // No method with correct name on this impl
};
return self.record_static_candidate(ImplSource(impl_def_id));
}
- if !item.vis().is_accessible_from(self.fcx.body_id, &self.tcx().map) {
+ if !item.vis().is_accessible_from(self.body_id, &self.tcx.map) {
self.private_candidate = Some(item.def());
return
}
let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
- let impl_ty = impl_ty.subst(self.tcx(), &impl_substs);
+ let impl_ty = impl_ty.subst(self.tcx, &impl_substs);
// Determine the receiver type that the method itself expects.
let xform_self_ty = self.xform_self_ty(&item, impl_ty, &impl_substs);
// We can't use normalize_associated_types_in as it will pollute the
// fcx's fulfillment context after this probe is over.
- let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
- let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx());
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let mut selcx = &mut traits::SelectionContext::new(self.fcx);
let traits::Normalized { value: xform_self_ty, obligations } =
traits::normalize(selcx, cause, &xform_self_ty);
debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}",
// a substitution that replaces `Self` with the object type
// itself. Hence, a `&self` method will wind up with an
// argument type like `&Trait`.
- let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx(), self_ty);
+ let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx, self_ty);
self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| {
let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref);
// FIXME -- Do we want to commit to this behavior for param bounds?
let bounds: Vec<_> =
- self.fcx.inh.infcx.parameter_environment.caller_bounds
+ self.parameter_environment.caller_bounds
.iter()
.filter_map(|predicate| {
match *predicate {
mut mk_cand: F,
) where
F: for<'b> FnMut(
- &mut ProbeContext<'b, 'tcx>,
+ &mut ProbeContext<'b, 'gcx, 'tcx>,
ty::PolyTraitRef<'tcx>,
ty::ImplOrTraitItem<'tcx>,
),
{
debug!("elaborate_bounds(bounds={:?})", bounds);
- let tcx = self.tcx();
+ let tcx = self.tcx;
for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
- let item = match trait_item(tcx,
- bound_trait_ref.def_id(),
- self.item_name) {
+ let item = match self.trait_item(bound_trait_ref.def_id()) {
Some(v) => v,
None => { continue; }
};
-> Result<(), MethodError<'tcx>>
{
let mut duplicates = HashSet::new();
- let opt_applicable_traits = self.fcx.ccx.trait_map.get(&expr_id);
+ let opt_applicable_traits = self.ccx.trait_map.get(&expr_id);
if let Some(applicable_traits) = opt_applicable_traits {
for trait_candidate in applicable_traits {
let trait_did = trait_candidate.def_id;
fn assemble_extension_candidates_for_all_traits(&mut self) -> Result<(), MethodError<'tcx>> {
let mut duplicates = HashSet::new();
- for trait_info in suggest::all_traits(self.fcx.ccx) {
+ for trait_info in suggest::all_traits(self.ccx) {
if duplicates.insert(trait_info.def_id) {
self.assemble_extension_candidates_for_trait(trait_info.def_id)?;
}
// Check whether `trait_def_id` defines a method with suitable name:
let trait_items =
- self.tcx().trait_items(trait_def_id);
+ self.tcx.trait_items(trait_def_id);
let maybe_item =
trait_items.iter()
.find(|item| item.name() == self.item_name);
trait_def_id: DefId,
item: ty::ImplOrTraitItem<'tcx>)
{
- let trait_def = self.tcx().lookup_trait_def(trait_def_id);
+ let trait_def = self.tcx.lookup_trait_def(trait_def_id);
// FIXME(arielb1): can we use for_each_relevant_impl here?
- trait_def.for_each_impl(self.tcx(), |impl_def_id| {
+ trait_def.for_each_impl(self.tcx, |impl_def_id| {
debug!("assemble_extension_candidates_for_trait_impl: trait_def_id={:?} \
impl_def_id={:?}",
trait_def_id,
debug!("impl_substs={:?}", impl_substs);
let impl_trait_ref =
- self.tcx().impl_trait_ref(impl_def_id)
+ self.tcx.impl_trait_ref(impl_def_id)
.unwrap() // we know this is a trait impl
- .subst(self.tcx(), &impl_substs);
+ .subst(self.tcx, &impl_substs);
debug!("impl_trait_ref={:?}", impl_trait_ref);
// Normalize the receiver. We can't use normalize_associated_types_in
// as it will pollute the fcx's fulfillment context after this probe
// is over.
- let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
- let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx());
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let mut selcx = &mut traits::SelectionContext::new(self.fcx);
let traits::Normalized { value: xform_self_ty, obligations } =
traits::normalize(selcx, cause, &xform_self_ty);
None => { return true; }
};
- let impl_type = self.tcx().lookup_item_type(impl_def_id);
+ let impl_type = self.tcx.lookup_item_type(impl_def_id);
let impl_simplified_type =
- match ty::fast_reject::simplify_type(self.tcx(), impl_type.ty, false) {
+ match ty::fast_reject::simplify_type(self.tcx, impl_type.ty, false) {
Some(simplified_type) => simplified_type,
None => { return true; }
};
-> Result<(), MethodError<'tcx>>
{
// Check if this is one of the Fn,FnMut,FnOnce traits.
- let tcx = self.tcx();
+ let tcx = self.tcx;
let kind = if Some(trait_def_id) == tcx.lang_items.fn_trait() {
ty::ClosureKind::Fn
} else if Some(trait_def_id) == tcx.lang_items.fn_mut_trait() {
_ => continue,
};
- let closure_kinds = &self.fcx.inh.tables.borrow().closure_kinds;
+ let closure_kinds = &self.tables.borrow().closure_kinds;
let closure_kind = match closure_kinds.get(&closure_def_id) {
Some(&k) => k,
None => {
// for the purposes of our method lookup, we only take
// receiver type into account, so we can just substitute
// fresh types here to use during substitution and subtyping.
- let trait_def = self.tcx().lookup_trait_def(trait_def_id);
- let substs = self.infcx().fresh_substs_for_trait(self.span,
- &trait_def.generics,
- step.self_ty);
+ let trait_def = self.tcx.lookup_trait_def(trait_def_id);
+ let substs = self.fresh_substs_for_trait(self.span,
+ &trait_def.generics,
+ step.self_ty);
let xform_self_ty = self.xform_self_ty(&item,
step.self_ty,
debug!("assemble_projection_candidates: projection_trait_ref={:?}",
projection_trait_ref);
- let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id);
- let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs);
+ let trait_predicates = self.tcx.lookup_predicates(projection_trait_ref.def_id);
+ let bounds = trait_predicates.instantiate(self.tcx, projection_trait_ref.substs);
let predicates = bounds.predicates.into_vec();
debug!("assemble_projection_candidates: predicates={:?}",
predicates);
for poly_bound in
- traits::elaborate_predicates(self.tcx(), predicates)
+ traits::elaborate_predicates(self.tcx, predicates)
.filter_map(|p| p.to_opt_poly_trait_ref())
.filter(|b| b.def_id() == trait_def_id)
{
projection_trait_ref,
bound);
- if self.infcx().can_equate(&step.self_ty, &bound.self_ty()).is_ok() {
+ if self.can_equate(&step.self_ty, &bound.self_ty()).is_ok() {
let xform_self_ty = self.xform_self_ty(&item,
bound.self_ty(),
bound.substs);
debug!("assemble_where_clause_candidates(trait_def_id={:?})",
trait_def_id);
- let caller_predicates = self.fcx.inh.infcx.parameter_environment.caller_bounds.clone();
- for poly_bound in traits::elaborate_predicates(self.tcx(), caller_predicates)
+ let caller_predicates = self.parameter_environment.caller_bounds.clone();
+ for poly_bound in traits::elaborate_predicates(self.tcx, caller_predicates)
.filter_map(|p| p.to_opt_poly_trait_ref())
.filter(|b| b.def_id() == trait_def_id)
{
self.reset();
let span = self.span;
- let tcx = self.tcx();
+ let tcx = self.tcx;
self.assemble_extension_candidates_for_all_traits()?;
step: &CandidateStep<'tcx>)
-> Option<PickResult<'tcx>>
{
- let tcx = self.tcx();
+ let tcx = self.tcx;
// In general, during probing we erase regions. See
// `impl_self_ty()` for an explanation.
}
fn pick_method(&mut self, self_ty: Ty<'tcx>) -> Option<PickResult<'tcx>> {
- debug!("pick_method(self_ty={})", self.infcx().ty_to_string(self_ty));
+ debug!("pick_method(self_ty={})", self.ty_to_string(self_ty));
let mut possibly_unsatisfied_predicates = Vec::new();
self_ty,
probe);
- self.infcx().probe(|_| {
+ self.probe(|_| {
// First check that the self type can be related.
- match self.make_sub_ty(self_ty, probe.xform_self_ty) {
- Ok(()) => { }
+ match self.sub_types(false, TypeOrigin::Misc(DUMMY_SP),
+ self_ty, probe.xform_self_ty) {
+ Ok(InferOk { obligations, .. }) => {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty())
+ }
Err(_) => {
debug!("--> cannot relate self-types");
return false;
}
};
- let selcx = &mut traits::SelectionContext::new(self.infcx());
- let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
+ let selcx = &mut traits::SelectionContext::new(self);
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
// Check whether the impl imposes obligations we have to worry about.
- let impl_bounds = self.tcx().lookup_predicates(impl_def_id);
- let impl_bounds = impl_bounds.instantiate(self.tcx(), substs);
+ let impl_bounds = self.tcx.lookup_predicates(impl_def_id);
+ let impl_bounds = impl_bounds.instantiate(self.tcx, substs);
let traits::Normalized { value: impl_bounds,
obligations: norm_obligations } =
traits::normalize(selcx, cause.clone(), &impl_bounds);
///////////////////////////////////////////////////////////////////////////
// MISCELLANY
-
- fn make_sub_ty(&self, sub: Ty<'tcx>, sup: Ty<'tcx>) -> infer::UnitResult<'tcx> {
- self.infcx().sub_types(false, TypeOrigin::Misc(DUMMY_SP), sub, sup)
- // FIXME(#32730) propagate obligations
- .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
- }
-
fn has_applicable_self(&self, item: &ty::ImplOrTraitItem) -> bool {
// "fast track" -- check for usage of sugar
match *item {
placeholder = (*substs).clone().with_method(Vec::new(), method_regions);
- self.infcx().type_vars_for_defs(
+ self.type_vars_for_defs(
self.span,
subst::FnSpace,
&mut placeholder,
// in the values from the substitution.
let xform_self_ty = method.fty.sig.input(0);
let xform_self_ty = self.erase_late_bound_regions(&xform_self_ty);
- let xform_self_ty = xform_self_ty.subst(self.tcx(), substs);
+ let xform_self_ty = xform_self_ty.subst(self.tcx, substs);
xform_self_ty
}
impl_def_id: DefId)
-> (Ty<'tcx>, subst::Substs<'tcx>)
{
- let impl_pty = self.tcx().lookup_item_type(impl_def_id);
+ let impl_pty = self.tcx.lookup_item_type(impl_def_id);
let type_vars =
impl_pty.generics.types.map(
- |_| self.infcx().next_ty_var());
+ |_| self.next_ty_var());
let region_placeholders =
impl_pty.generics.regions.map(
fn erase_late_bound_regions<T>(&self, value: &ty::Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
- self.tcx().erase_late_bound_regions(value)
+ self.tcx.erase_late_bound_regions(value)
}
-}
-fn impl_item<'tcx>(tcx: &TyCtxt<'tcx>,
- impl_def_id: DefId,
- item_name: ast::Name)
- -> Option<ty::ImplOrTraitItem<'tcx>>
-{
- let impl_items = tcx.impl_items.borrow();
- let impl_items = impl_items.get(&impl_def_id).unwrap();
- impl_items
- .iter()
- .map(|&did| tcx.impl_or_trait_item(did.def_id()))
- .find(|item| item.name() == item_name)
-}
+ fn impl_item(&self, impl_def_id: DefId)
+ -> Option<ty::ImplOrTraitItem<'tcx>>
+ {
+ self.fcx.impl_item(impl_def_id, self.item_name)
+ }
-/// Find item with name `item_name` defined in `trait_def_id`
-/// and return it, or `None`, if no such item.
-fn trait_item<'tcx>(tcx: &TyCtxt<'tcx>,
- trait_def_id: DefId,
- item_name: ast::Name)
- -> Option<ty::ImplOrTraitItem<'tcx>>
-{
- let trait_items = tcx.trait_items(trait_def_id);
- debug!("trait_method; items: {:?}", trait_items);
- trait_items.iter()
- .find(|item| item.name() == item_name)
- .cloned()
+ /// Find item with name `item_name` defined in `trait_def_id`
+ /// and return it, or `None`, if no such item.
+ fn trait_item(&self, trait_def_id: DefId)
+ -> Option<ty::ImplOrTraitItem<'tcx>>
+ {
+ self.fcx.trait_item(trait_def_id, self.item_name)
+ }
}
impl<'tcx> Candidate<'tcx> {
use CrateCtxt;
-use check::{self, FnCtxt, UnresolvedTypeAction, autoderef};
+use check::{self, FnCtxt, UnresolvedTypeAction};
use rustc::hir::map as hir_map;
use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable};
use middle::cstore;
use std::cell;
use std::cmp::Ordering;
-use super::{MethodError, NoMatchData, CandidateSource, impl_item, trait_item};
+use super::{MethodError, NoMatchData, CandidateSource};
use super::probe::Mode;
-fn is_fn_ty<'a, 'tcx>(ty: &Ty<'tcx>, fcx: &FnCtxt<'a, 'tcx>, span: Span) -> bool {
- let cx = fcx.tcx();
- match ty.sty {
- // Not all of these (e.g. unsafe fns) implement FnOnce
- // so we look for these beforehand
- ty::TyClosure(..) | ty::TyFnDef(..) | ty::TyFnPtr(_) => true,
- // If it's not a simple function, look for things which implement FnOnce
- _ => {
- if let Ok(fn_once_trait_did) =
- cx.lang_items.require(FnOnceTraitLangItem) {
- let infcx = fcx.infcx();
- let (_, _, opt_is_fn) = autoderef(fcx,
- span,
- ty,
- || None,
- UnresolvedTypeAction::Ignore,
- LvaluePreference::NoPreference,
- |ty, _| {
- infcx.probe(|_| {
- let fn_once_substs =
- Substs::new_trait(vec![infcx.next_ty_var()],
- Vec::new(),
- ty);
- let trait_ref =
- ty::TraitRef::new(fn_once_trait_did,
- cx.mk_substs(fn_once_substs));
- let poly_trait_ref = trait_ref.to_poly_trait_ref();
- let obligation = Obligation::misc(span,
- fcx.body_id,
- poly_trait_ref
- .to_predicate());
- let mut selcx = SelectionContext::new(infcx);
-
- if selcx.evaluate_obligation(&obligation) {
- Some(())
- } else {
- None
- }
- })
- });
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ fn is_fn_ty(&self, ty: &Ty<'tcx>, span: Span) -> bool {
+ let tcx = self.tcx;
+ match ty.sty {
+ // Not all of these (e.g. unsafe fns) implement FnOnce
+ // so we look for these beforehand
+ ty::TyClosure(..) | ty::TyFnDef(..) | ty::TyFnPtr(_) => true,
+ // If it's not a simple function, look for things which implement FnOnce
+ _ => {
+ if let Ok(fn_once_trait_did) =
+ tcx.lang_items.require(FnOnceTraitLangItem) {
+ let (_, _, opt_is_fn) = self.autoderef(span,
+ ty,
+ || None,
+ UnresolvedTypeAction::Ignore,
+ LvaluePreference::NoPreference,
+ |ty, _| {
+ self.probe(|_| {
+ let fn_once_substs =
+ Substs::new_trait(vec![self.next_ty_var()], vec![], ty);
+ let trait_ref =
+ ty::TraitRef::new(fn_once_trait_did,
+ tcx.mk_substs(fn_once_substs));
+ let poly_trait_ref = trait_ref.to_poly_trait_ref();
+ let obligation = Obligation::misc(span,
+ self.body_id,
+ poly_trait_ref
+ .to_predicate());
+ let mut selcx = SelectionContext::new(self);
+
+ if selcx.evaluate_obligation(&obligation) {
+ Some(())
+ } else {
+ None
+ }
+ })
+ });
- opt_is_fn.is_some()
- } else {
- false
+ opt_is_fn.is_some()
+ } else {
+ false
+ }
}
}
}
-}
-
-pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- rcvr_ty: Ty<'tcx>,
- item_name: ast::Name,
- rcvr_expr: Option<&hir::Expr>,
- error: MethodError<'tcx>)
-{
- // avoid suggestions when we don't know what's going on.
- if rcvr_ty.references_error() {
- return
- }
+ pub fn report_method_error(&self,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ item_name: ast::Name,
+ rcvr_expr: Option<&hir::Expr>,
+ error: MethodError<'tcx>)
+ {
+ // avoid suggestions when we don't know what's going on.
+ if rcvr_ty.references_error() {
+ return
+ }
- match error {
- MethodError::NoMatch(NoMatchData { static_candidates: static_sources,
- unsatisfied_predicates,
- out_of_scope_traits,
- mode, .. }) => {
- let cx = fcx.tcx();
-
- let mut err = fcx.type_error_struct(
- span,
- |actual| {
- format!("no {} named `{}` found for type `{}` \
- in the current scope",
- if mode == Mode::MethodCall { "method" }
- else { "associated item" },
- item_name,
- actual)
- },
- rcvr_ty,
- None);
-
- // If the item has the name of a field, give a help note
- if let (&ty::TyStruct(def, substs), Some(expr)) = (&rcvr_ty.sty, rcvr_expr) {
- if let Some(field) = def.struct_variant().find_field_named(item_name) {
- let expr_string = match cx.sess.codemap().span_to_snippet(expr.span) {
- Ok(expr_string) => expr_string,
- _ => "s".into() // Default to a generic placeholder for the
- // expression when we can't generate a string
- // snippet
- };
-
- let field_ty = field.ty(cx, substs);
-
- if is_fn_ty(&field_ty, &fcx, span) {
- err.span_note(span,
- &format!("use `({0}.{1})(...)` if you meant to call \
- the function stored in the `{1}` field",
- expr_string, item_name));
- } else {
- err.span_note(span, &format!("did you mean to write `{0}.{1}`?",
- expr_string, item_name));
+ let report_candidates = |err: &mut DiagnosticBuilder,
+ mut sources: Vec<CandidateSource>| {
+
+ sources.sort();
+ sources.dedup();
+
+ for (idx, source) in sources.iter().enumerate() {
+ match *source {
+ CandidateSource::ImplSource(impl_did) => {
+ // Provide the best span we can. Use the item, if local to crate, else
+ // the impl, if local to crate (item may be defaulted), else nothing.
+ let item = self.impl_item(impl_did, item_name)
+ .or_else(|| {
+ self.trait_item(
+ self.tcx.impl_trait_ref(impl_did).unwrap().def_id,
+
+ item_name
+ )
+ }).unwrap();
+ let note_span = self.tcx.map.span_if_local(item.def_id()).or_else(|| {
+ self.tcx.map.span_if_local(impl_did)
+ });
+
+ let impl_ty = self.impl_self_ty(span, impl_did).ty;
+
+ let insertion = match self.tcx.impl_trait_ref(impl_did) {
+ None => format!(""),
+ Some(trait_ref) => {
+ format!(" of the trait `{}`",
+ self.tcx.item_path_str(trait_ref.def_id))
+ }
+ };
+
+ let note_str = format!("candidate #{} is defined in an impl{} \
+ for the type `{}`",
+ idx + 1,
+ insertion,
+ impl_ty);
+ if let Some(note_span) = note_span {
+ // We have a span pointing to the method. Show note with snippet.
+ err.span_note(note_span, ¬e_str);
+ } else {
+ err.note(¬e_str);
+ }
+ }
+ CandidateSource::TraitSource(trait_did) => {
+ let item = self.trait_item(trait_did, item_name).unwrap();
+ let item_span = self.tcx.map.def_id_span(item.def_id(), span);
+ span_note!(err, item_span,
+ "candidate #{} is defined in the trait `{}`",
+ idx + 1,
+ self.tcx.item_path_str(trait_did));
}
}
}
-
- if is_fn_ty(&rcvr_ty, &fcx, span) {
- macro_rules! report_function {
- ($span:expr, $name:expr) => {
- err.note(&format!("{} is a function, perhaps you wish to call it",
- $name));
+ };
+
+ match error {
+ MethodError::NoMatch(NoMatchData { static_candidates: static_sources,
+ unsatisfied_predicates,
+ out_of_scope_traits,
+ mode, .. }) => {
+ let tcx = self.tcx;
+
+ let mut err = self.type_error_struct(
+ span,
+ |actual| {
+ format!("no {} named `{}` found for type `{}` \
+ in the current scope",
+ if mode == Mode::MethodCall { "method" }
+ else { "associated item" },
+ item_name,
+ actual)
+ },
+ rcvr_ty,
+ None);
+
+ // If the item has the name of a field, give a help note
+ if let (&ty::TyStruct(def, substs), Some(expr)) = (&rcvr_ty.sty, rcvr_expr) {
+ if let Some(field) = def.struct_variant().find_field_named(item_name) {
+ let expr_string = match tcx.sess.codemap().span_to_snippet(expr.span) {
+ Ok(expr_string) => expr_string,
+ _ => "s".into() // Default to a generic placeholder for the
+ // expression when we can't generate a string
+ // snippet
+ };
+
+ let field_ty = field.ty(tcx, substs);
+
+ if self.is_fn_ty(&field_ty, span) {
+ err.span_note(span,
+ &format!("use `({0}.{1})(...)` if you meant to call \
+ the function stored in the `{1}` field",
+ expr_string, item_name));
+ } else {
+ err.span_note(span, &format!("did you mean to write `{0}.{1}`?",
+ expr_string, item_name));
+ }
}
}
- if let Some(expr) = rcvr_expr {
- if let Ok (expr_string) = cx.sess.codemap().span_to_snippet(expr.span) {
- report_function!(expr.span, expr_string);
+ if self.is_fn_ty(&rcvr_ty, span) {
+ macro_rules! report_function {
+ ($span:expr, $name:expr) => {
+ err.note(&format!("{} is a function, perhaps you wish to call it",
+ $name));
+ }
}
- else if let Expr_::ExprPath(_, path) = expr.node.clone() {
- if let Some(segment) = path.segments.last() {
- report_function!(expr.span, segment.identifier.name);
+
+ if let Some(expr) = rcvr_expr {
+ if let Ok (expr_string) = tcx.sess.codemap().span_to_snippet(expr.span) {
+ report_function!(expr.span, expr_string);
+ }
+ else if let Expr_::ExprPath(_, path) = expr.node.clone() {
+ if let Some(segment) = path.segments.last() {
+ report_function!(expr.span, segment.identifier.name);
+ }
}
}
}
- }
- if !static_sources.is_empty() {
- err.note(
- "found the following associated functions; to be used as \
- methods, functions must have a `self` parameter");
+ if !static_sources.is_empty() {
+ err.note(
+ "found the following associated functions; to be used as \
+ methods, functions must have a `self` parameter");
- report_candidates(fcx, &mut err, span, item_name, static_sources);
- }
+ report_candidates(&mut err, static_sources);
+ }
- if !unsatisfied_predicates.is_empty() {
- let bound_list = unsatisfied_predicates.iter()
- .map(|p| format!("`{} : {}`",
- p.self_ty(),
- p))
- .collect::<Vec<_>>()
- .join(", ");
- err.note(
- &format!("the method `{}` exists but the \
- following trait bounds were not satisfied: {}",
- item_name,
- bound_list));
- }
+ if !unsatisfied_predicates.is_empty() {
+ let bound_list = unsatisfied_predicates.iter()
+ .map(|p| format!("`{} : {}`",
+ p.self_ty(),
+ p))
+ .collect::<Vec<_>>()
+ .join(", ");
+ err.note(
+ &format!("the method `{}` exists but the \
+ following trait bounds were not satisfied: {}",
+ item_name,
+ bound_list));
+ }
- suggest_traits_to_import(fcx, &mut err, span, rcvr_ty, item_name,
- rcvr_expr, out_of_scope_traits);
- err.emit();
- }
+ self.suggest_traits_to_import(&mut err, span, rcvr_ty, item_name,
+ rcvr_expr, out_of_scope_traits);
+ err.emit();
+ }
- MethodError::Ambiguity(sources) => {
- let mut err = struct_span_err!(fcx.sess(), span, E0034,
- "multiple applicable items in scope");
+ MethodError::Ambiguity(sources) => {
+ let mut err = struct_span_err!(self.sess(), span, E0034,
+ "multiple applicable items in scope");
- report_candidates(fcx, &mut err, span, item_name, sources);
- err.emit();
- }
+ report_candidates(&mut err, sources);
+ err.emit();
+ }
- MethodError::ClosureAmbiguity(trait_def_id) => {
- let msg = format!("the `{}` method from the `{}` trait cannot be explicitly \
- invoked on this closure as we have not yet inferred what \
- kind of closure it is",
- item_name,
- fcx.tcx().item_path_str(trait_def_id));
- let msg = if let Some(callee) = rcvr_expr {
- format!("{}; use overloaded call notation instead (e.g., `{}()`)",
- msg, pprust::expr_to_string(callee))
- } else {
- msg
- };
- fcx.sess().span_err(span, &msg);
- }
+ MethodError::ClosureAmbiguity(trait_def_id) => {
+ let msg = format!("the `{}` method from the `{}` trait cannot be explicitly \
+ invoked on this closure as we have not yet inferred what \
+ kind of closure it is",
+ item_name,
+ self.tcx.item_path_str(trait_def_id));
+ let msg = if let Some(callee) = rcvr_expr {
+ format!("{}; use overloaded call notation instead (e.g., `{}()`)",
+ msg, pprust::expr_to_string(callee))
+ } else {
+ msg
+ };
+ self.sess().span_err(span, &msg);
+ }
- MethodError::PrivateMatch(def) => {
- let msg = format!("{} `{}` is private", def.kind_name(), item_name);
- fcx.tcx().sess.span_err(span, &msg);
+ MethodError::PrivateMatch(def) => {
+ let msg = format!("{} `{}` is private", def.kind_name(), item_name);
+ self.tcx.sess.span_err(span, &msg);
+ }
}
}
- fn report_candidates(fcx: &FnCtxt,
- err: &mut DiagnosticBuilder,
- span: Span,
- item_name: ast::Name,
- mut sources: Vec<CandidateSource>) {
- sources.sort();
- sources.dedup();
-
- for (idx, source) in sources.iter().enumerate() {
- match *source {
- CandidateSource::ImplSource(impl_did) => {
- // Provide the best span we can. Use the item, if local to crate, else
- // the impl, if local to crate (item may be defaulted), else nothing.
- let item = impl_item(fcx.tcx(), impl_did, item_name)
- .or_else(|| {
- trait_item(
- fcx.tcx(),
- fcx.tcx().impl_trait_ref(impl_did).unwrap().def_id,
- item_name
- )
- }).unwrap();
- let note_span = fcx.tcx().map.span_if_local(item.def_id()).or_else(|| {
- fcx.tcx().map.span_if_local(impl_did)
- });
-
- let impl_ty = check::impl_self_ty(fcx, span, impl_did).ty;
-
- let insertion = match fcx.tcx().impl_trait_ref(impl_did) {
- None => format!(""),
- Some(trait_ref) => {
- format!(" of the trait `{}`",
- fcx.tcx().item_path_str(trait_ref.def_id))
- }
- };
-
- let note_str = format!("candidate #{} is defined in an impl{} \
- for the type `{}`",
- idx + 1,
- insertion,
- impl_ty);
- if let Some(note_span) = note_span {
- // We have a span pointing to the method. Show note with snippet.
- err.span_note(note_span, ¬e_str);
- } else {
- err.note(¬e_str);
- }
- }
- CandidateSource::TraitSource(trait_did) => {
- let item = trait_item(fcx.tcx(), trait_did, item_name).unwrap();
- let item_span = fcx.tcx().map.def_id_span(item.def_id(), span);
- span_note!(err, item_span,
- "candidate #{} is defined in the trait `{}`",
- idx + 1,
- fcx.tcx().item_path_str(trait_did));
- }
+ fn suggest_traits_to_import(&self,
+ err: &mut DiagnosticBuilder,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ item_name: ast::Name,
+ rcvr_expr: Option<&hir::Expr>,
+ valid_out_of_scope_traits: Vec<DefId>)
+ {
+ if !valid_out_of_scope_traits.is_empty() {
+ let mut candidates = valid_out_of_scope_traits;
+ candidates.sort();
+ candidates.dedup();
+ let msg = format!(
+ "items from traits can only be used if the trait is in scope; \
+ the following {traits_are} implemented but not in scope, \
+ perhaps add a `use` for {one_of_them}:",
+ traits_are = if candidates.len() == 1 {"trait is"} else {"traits are"},
+ one_of_them = if candidates.len() == 1 {"it"} else {"one of them"});
+
+ err.help(&msg[..]);
+
+ for (i, trait_did) in candidates.iter().enumerate() {
+ err.help(&format!("candidate #{}: `use {}`",
+ i + 1,
+ self.tcx.item_path_str(*trait_did)));
}
+ return
}
- }
-}
-
-pub type AllTraitsVec = Vec<TraitInfo>;
-
-fn suggest_traits_to_import<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- err: &mut DiagnosticBuilder,
- span: Span,
- rcvr_ty: Ty<'tcx>,
- item_name: ast::Name,
- rcvr_expr: Option<&hir::Expr>,
- valid_out_of_scope_traits: Vec<DefId>)
-{
- let tcx = fcx.tcx();
-
- if !valid_out_of_scope_traits.is_empty() {
- let mut candidates = valid_out_of_scope_traits;
- candidates.sort();
- candidates.dedup();
- let msg = format!(
- "items from traits can only be used if the trait is in scope; \
- the following {traits_are} implemented but not in scope, \
- perhaps add a `use` for {one_of_them}:",
- traits_are = if candidates.len() == 1 {"trait is"} else {"traits are"},
- one_of_them = if candidates.len() == 1 {"it"} else {"one of them"});
-
- err.help(&msg[..]);
-
- for (i, trait_did) in candidates.iter().enumerate() {
- err.help(&format!("candidate #{}: `use {}`",
- i + 1,
- fcx.tcx().item_path_str(*trait_did)));
+ let type_is_local = self.type_derefs_to_local(span, rcvr_ty, rcvr_expr);
+
+ // there's no implemented traits, so lets suggest some traits to
+ // implement, by finding ones that have the item name, and are
+ // legal to implement.
+ let mut candidates = all_traits(self.ccx)
+ .filter(|info| {
+ // we approximate the coherence rules to only suggest
+ // traits that are legal to implement by requiring that
+ // either the type or trait is local. Multidispatch means
+ // this isn't perfect (that is, there are cases when
+ // implementing a trait would be legal but is rejected
+ // here).
+ (type_is_local || info.def_id.is_local())
+ && self.trait_item(info.def_id, item_name).is_some()
+ })
+ .collect::<Vec<_>>();
+
+ if !candidates.is_empty() {
+ // sort from most relevant to least relevant
+ candidates.sort_by(|a, b| a.cmp(b).reverse());
+ candidates.dedup();
+
+ // FIXME #21673 this help message could be tuned to the case
+ // of a type parameter: suggest adding a trait bound rather
+ // than implementing.
+ let msg = format!(
+ "items from traits can only be used if the trait is implemented and in scope; \
+ the following {traits_define} an item `{name}`, \
+ perhaps you need to implement {one_of_them}:",
+ traits_define = if candidates.len() == 1 {"trait defines"} else {"traits define"},
+ one_of_them = if candidates.len() == 1 {"it"} else {"one of them"},
+ name = item_name);
+
+ err.help(&msg[..]);
+
+ for (i, trait_info) in candidates.iter().enumerate() {
+ err.help(&format!("candidate #{}: `{}`",
+ i + 1,
+ self.tcx.item_path_str(trait_info.def_id)));
+ }
}
- return
}
- let type_is_local = type_derefs_to_local(fcx, span, rcvr_ty, rcvr_expr);
-
- // there's no implemented traits, so lets suggest some traits to
- // implement, by finding ones that have the item name, and are
- // legal to implement.
- let mut candidates = all_traits(fcx.ccx)
- .filter(|info| {
- // we approximate the coherence rules to only suggest
- // traits that are legal to implement by requiring that
- // either the type or trait is local. Multidispatch means
- // this isn't perfect (that is, there are cases when
- // implementing a trait would be legal but is rejected
- // here).
- (type_is_local || info.def_id.is_local())
- && trait_item(tcx, info.def_id, item_name).is_some()
- })
- .collect::<Vec<_>>();
-
- if !candidates.is_empty() {
- // sort from most relevant to least relevant
- candidates.sort_by(|a, b| a.cmp(b).reverse());
- candidates.dedup();
-
- // FIXME #21673 this help message could be tuned to the case
- // of a type parameter: suggest adding a trait bound rather
- // than implementing.
- let msg = format!(
- "items from traits can only be used if the trait is implemented and in scope; \
- the following {traits_define} an item `{name}`, \
- perhaps you need to implement {one_of_them}:",
- traits_define = if candidates.len() == 1 {"trait defines"} else {"traits define"},
- one_of_them = if candidates.len() == 1 {"it"} else {"one of them"},
- name = item_name);
-
- err.help(&msg[..]);
-
- for (i, trait_info) in candidates.iter().enumerate() {
- err.help(&format!("candidate #{}: `{}`",
- i + 1,
- fcx.tcx().item_path_str(trait_info.def_id)));
+ /// Checks whether there is a local type somewhere in the chain of
+ /// autoderefs of `rcvr_ty`.
+ fn type_derefs_to_local(&self,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ rcvr_expr: Option<&hir::Expr>) -> bool {
+ fn is_local(ty: Ty) -> bool {
+ match ty.sty {
+ ty::TyEnum(def, _) | ty::TyStruct(def, _) => def.did.is_local(),
+
+ ty::TyTrait(ref tr) => tr.principal_def_id().is_local(),
+
+ ty::TyParam(_) => true,
+
+ // everything else (primitive types etc.) is effectively
+ // non-local (there are "edge" cases, e.g. (LocalType,), but
+ // the noise from these sort of types is usually just really
+ // annoying, rather than any sort of help).
+ _ => false
+ }
}
- }
-}
-
-/// Checks whether there is a local type somewhere in the chain of
-/// autoderefs of `rcvr_ty`.
-fn type_derefs_to_local<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- rcvr_ty: Ty<'tcx>,
- rcvr_expr: Option<&hir::Expr>) -> bool {
- fn is_local(ty: Ty) -> bool {
- match ty.sty {
- ty::TyEnum(def, _) | ty::TyStruct(def, _) => def.did.is_local(),
-
- ty::TyTrait(ref tr) => tr.principal_def_id().is_local(),
-
- ty::TyParam(_) => true,
- // everything else (primitive types etc.) is effectively
- // non-local (there are "edge" cases, e.g. (LocalType,), but
- // the noise from these sort of types is usually just really
- // annoying, rather than any sort of help).
- _ => false
+ // This occurs for UFCS desugaring of `T::method`, where there is no
+ // receiver expression for the method call, and thus no autoderef.
+ if rcvr_expr.is_none() {
+ return is_local(self.resolve_type_vars_with_obligations(rcvr_ty));
}
- }
- // This occurs for UFCS desugaring of `T::method`, where there is no
- // receiver expression for the method call, and thus no autoderef.
- if rcvr_expr.is_none() {
- return is_local(fcx.resolve_type_vars_if_possible(rcvr_ty));
+ self.autoderef(span, rcvr_ty, || None,
+ check::UnresolvedTypeAction::Ignore, ty::NoPreference,
+ |ty, _| {
+ if is_local(ty) {
+ Some(())
+ } else {
+ None
+ }
+ }).2.is_some()
}
-
- check::autoderef(fcx, span, rcvr_ty, || None,
- check::UnresolvedTypeAction::Ignore, ty::NoPreference,
- |ty, _| {
- if is_local(ty) {
- Some(())
- } else {
- None
- }
- }).2.is_some()
}
+pub type AllTraitsVec = Vec<TraitInfo>;
+
#[derive(Copy, Clone)]
pub struct TraitInfo {
pub def_id: DefId,
pub use self::compare_method::{compare_impl_method, compare_const_impl};
use self::TupleArgumentsFlag::*;
-use astconv::{self, ast_region_to_region, ast_ty_to_ty, AstConv, PathParamMode};
-use check::_match::pat_ctxt;
+use astconv::{AstConv, ast_region_to_region, PathParamMode};
+use check::_match::PatCtxt;
use dep_graph::DepNode;
use fmt_macros::{Parser, Piece, Position};
-use middle::astconv_util::prohibit_type_params;
use middle::cstore::LOCAL_CRATE;
use hir::def::{self, Def};
use hir::def_id::DefId;
-use rustc::infer::{self, InferOk, TypeOrigin, TypeTrace, type_variable};
+use rustc::infer::{self, InferCtxt, InferOk, TypeOrigin, TypeTrace, type_variable};
use hir::pat_util::{self, pat_id_map};
use rustc::ty::subst::{self, Subst, Substs, VecPerParamSpace, ParamSpace};
-use rustc::traits::{self, report_fulfillment_errors, ProjectionMode};
+use rustc::traits::{self, ProjectionMode};
use rustc::ty::{GenericPredicates, TypeScheme};
use rustc::ty::{ParamTy, ParameterEnvironment};
use rustc::ty::{LvaluePreference, NoPreference, PreferMutLvalue};
use rustc::ty::{self, ToPolyTraitRef, Ty, TyCtxt, Visibility};
use rustc::ty::{MethodCall, MethodCallee};
use rustc::ty::adjustment;
-use rustc::ty::error::TypeError;
use rustc::ty::fold::TypeFoldable;
use rustc::ty::util::{Representability, IntTypeExt};
use require_c_abi_if_variadic;
use std::cell::{Cell, Ref, RefCell};
use std::collections::{HashSet};
use std::mem::replace;
+use std::ops::Deref;
use syntax::abi::Abi;
use syntax::ast;
use syntax::attr;
/// Here, the function `foo()` and the closure passed to
/// `bar()` will each have their own `FnCtxt`, but they will
/// share the inherited fields.
-pub struct Inherited<'a, 'tcx: 'a> {
- infcx: infer::InferCtxt<'a, 'tcx>,
+pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'gcx>,
+ infcx: InferCtxt<'a, 'gcx, 'tcx>,
locals: RefCell<NodeMap<Ty<'tcx>>>,
fulfillment_cx: RefCell<traits::FulfillmentContext<'tcx>>,
- tables: &'a RefCell<ty::Tables<'tcx>>,
-
// When we process a call like `c()` where `c` is a closure type,
// we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
// `FnOnce` closure. In that case, we defer full resolution of the
// decision. We keep these deferred resolutions grouped by the
// def-id of the closure, so that once we decide, we can easily go
// back and process them.
- deferred_call_resolutions: RefCell<DefIdMap<Vec<DeferredCallResolutionHandler<'tcx>>>>,
+ deferred_call_resolutions: RefCell<DefIdMap<Vec<DeferredCallResolutionHandler<'gcx, 'tcx>>>>,
deferred_cast_checks: RefCell<Vec<cast::CastCheck<'tcx>>>,
}
-trait DeferredCallResolution<'tcx> {
- fn resolve<'a>(&mut self, fcx: &FnCtxt<'a,'tcx>);
+impl<'a, 'gcx, 'tcx> Deref for Inherited<'a, 'gcx, 'tcx> {
+ type Target = InferCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.infcx
+ }
+}
+
+trait DeferredCallResolution<'gcx, 'tcx> {
+ fn resolve<'a>(&mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>);
}
-type DeferredCallResolutionHandler<'tcx> = Box<DeferredCallResolution<'tcx>+'tcx>;
+type DeferredCallResolutionHandler<'gcx, 'tcx> = Box<DeferredCallResolution<'gcx, 'tcx>+'tcx>;
/// When type-checking an expression, we propagate downward
/// whatever type hint we are able in the form of an `Expectation`.
ExpectRvalueLikeUnsized(Ty<'tcx>),
}
-impl<'tcx> Expectation<'tcx> {
+impl<'a, 'gcx, 'tcx> Expectation<'tcx> {
// Disregard "castable to" expectations because they
// can lead us astray. Consider for example `if cond
// {22} else {c} as u8` -- if we propagate the
// an expected type. Otherwise, we might write parts of the type
// when checking the 'then' block which are incompatible with the
// 'else' branch.
- fn adjust_for_branches<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
+ fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> {
match *self {
ExpectHasType(ety) => {
- let ety = fcx.infcx().shallow_resolve(ety);
+ let ety = fcx.shallow_resolve(ety);
if !ety.is_ty_var() {
ExpectHasType(ety)
} else {
_ => NoExpectation
}
}
+
+ /// Provide an expectation for an rvalue expression given an *optional*
+ /// hint, which is not required for type safety (the resulting type might
+ /// be checked higher up, as is the case with `&expr` and `box expr`), but
+ /// is useful in determining the concrete type.
+ ///
+ /// The primary use case is where the expected type is a fat pointer,
+ /// like `&[isize]`. For example, consider the following statement:
+ ///
+ /// let x: &[isize] = &[1, 2, 3];
+ ///
+ /// In this case, the expected type for the `&[1, 2, 3]` expression is
+ /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the
+ /// expectation `ExpectHasType([isize])`, that would be too strong --
+ /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`.
+ /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced
+ /// to the type `&[isize]`. Therefore, we propagate this more limited hint,
+ /// which still is useful, because it informs integer literals and the like.
+ /// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169
+ /// for examples of where this comes up,.
+ fn rvalue_hint(fcx: &FnCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
+ match fcx.tcx.struct_tail(ty).sty {
+ ty::TySlice(_) | ty::TyStr | ty::TyTrait(..) => {
+ ExpectRvalueLikeUnsized(ty)
+ }
+ _ => ExpectHasType(ty)
+ }
+ }
+
+ // Resolves `expected` by a single level if it is a variable. If
+ // there is no expected type or resolution is not possible (e.g.,
+ // no constraints yet present), just returns `None`.
+ fn resolve(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> {
+ match self {
+ NoExpectation => {
+ NoExpectation
+ }
+ ExpectCastableToType(t) => {
+ ExpectCastableToType(fcx.resolve_type_vars_if_possible(&t))
+ }
+ ExpectHasType(t) => {
+ ExpectHasType(fcx.resolve_type_vars_if_possible(&t))
+ }
+ ExpectRvalueLikeUnsized(t) => {
+ ExpectRvalueLikeUnsized(fcx.resolve_type_vars_if_possible(&t))
+ }
+ }
+ }
+
+ fn to_option(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
+ match self.resolve(fcx) {
+ NoExpectation => None,
+ ExpectCastableToType(ty) |
+ ExpectHasType(ty) |
+ ExpectRvalueLikeUnsized(ty) => Some(ty),
+ }
+ }
+
+ fn only_has_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
+ match self.resolve(fcx) {
+ ExpectHasType(ty) => Some(ty),
+ _ => None
+ }
+ }
}
#[derive(Copy, Clone)]
}
#[derive(Clone)]
-pub struct FnCtxt<'a, 'tcx: 'a> {
+pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
body_id: ast::NodeId,
// This flag is set to true if, during the writeback phase, we encounter
ps: RefCell<UnsafetyState>,
- inh: &'a Inherited<'a, 'tcx>,
+ inh: &'a Inherited<'a, 'gcx, 'tcx>,
+}
+
+impl<'a, 'gcx, 'tcx> Deref for FnCtxt<'a, 'gcx, 'tcx> {
+ type Target = Inherited<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.inh
+ }
+}
- ccx: &'a CrateCtxt<'a, 'tcx>,
+/// Helper type of a temporary returned by ccx.inherited(...).
+/// Necessary because we can't write the following bound:
+/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Inherited<'b, 'gcx, 'tcx>).
+pub struct InheritedBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'gcx>,
+ infcx: infer::InferCtxtBuilder<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> Inherited<'a, 'tcx> {
- fn new(tcx: &'a TyCtxt<'tcx>,
- tables: &'a RefCell<ty::Tables<'tcx>>,
- param_env: ty::ParameterEnvironment<'a, 'tcx>)
- -> Inherited<'a, 'tcx> {
-
- Inherited {
- infcx: infer::new_infer_ctxt(tcx, tables, Some(param_env), ProjectionMode::AnyFinal),
- fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()),
- locals: RefCell::new(NodeMap()),
- tables: tables,
- deferred_call_resolutions: RefCell::new(DefIdMap()),
- deferred_cast_checks: RefCell::new(Vec::new()),
+impl<'a, 'gcx, 'tcx> CrateCtxt<'a, 'gcx> {
+ pub fn inherited(&'a self, param_env: Option<ty::ParameterEnvironment<'gcx>>)
+ -> InheritedBuilder<'a, 'gcx, 'tcx> {
+ InheritedBuilder {
+ ccx: self,
+ infcx: self.tcx.infer_ctxt(Some(ty::Tables::empty()),
+ param_env,
+ ProjectionMode::AnyFinal)
}
}
+}
+
+impl<'a, 'gcx, 'tcx> InheritedBuilder<'a, 'gcx, 'tcx> {
+ fn enter<F, R>(&'tcx mut self, f: F) -> R
+ where F: for<'b> FnOnce(Inherited<'b, 'gcx, 'tcx>) -> R
+ {
+ let ccx = self.ccx;
+ self.infcx.enter(|infcx| {
+ f(Inherited {
+ ccx: ccx,
+ infcx: infcx,
+ fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()),
+ locals: RefCell::new(NodeMap()),
+ deferred_call_resolutions: RefCell::new(DefIdMap()),
+ deferred_cast_checks: RefCell::new(Vec::new()),
+ })
+ })
+ }
+}
+impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> {
fn normalize_associated_types_in<T>(&self,
span: Span,
body_id: ast::NodeId,
-> T
where T : TypeFoldable<'tcx>
{
- assoc::normalize_associated_types_in(&self.infcx,
+ assoc::normalize_associated_types_in(self,
&mut self.fulfillment_cx.borrow_mut(),
span,
body_id,
}
-// Used by check_const and check_enum_variants
-pub fn blank_fn_ctxt<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
- inh: &'a Inherited<'a, 'tcx>,
- rty: ty::FnOutput<'tcx>,
- body_id: ast::NodeId)
- -> FnCtxt<'a, 'tcx> {
- FnCtxt {
- body_id: body_id,
- writeback_errors: Cell::new(false),
- err_count_on_creation: ccx.tcx.sess.err_count(),
- ret_ty: rty,
- ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, 0)),
- inh: inh,
- ccx: ccx
- }
-}
-
-fn static_inherited_fields<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
- tables: &'a RefCell<ty::Tables<'tcx>>)
- -> Inherited<'a, 'tcx> {
- // It's kind of a kludge to manufacture a fake function context
- // and statement context, but we might as well do write the code only once
- let param_env = ccx.tcx.empty_parameter_environment();
- Inherited::new(ccx.tcx, &tables, param_env)
-}
-
struct CheckItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
struct CheckItemBodiesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
drop_trait.for_each_impl(ccx.tcx, |drop_impl_did| {
let _task = ccx.tcx.dep_graph.in_task(DepNode::DropckImpl(drop_impl_did));
if drop_impl_did.is_local() {
- match dropck::check_drop_impl(ccx.tcx, drop_impl_did) {
+ match dropck::check_drop_impl(ccx, drop_impl_did) {
Ok(()) => {}
Err(()) => {
assert!(ccx.tcx.sess.has_errors());
fn_id: ast::NodeId,
fn_span: Span,
raw_fty: Ty<'tcx>,
- param_env: ty::ParameterEnvironment<'a, 'tcx>)
+ param_env: ty::ParameterEnvironment<'tcx>)
{
- match raw_fty.sty {
- ty::TyFnDef(_, _, ref fn_ty) => {
- let tables = RefCell::new(ty::Tables::empty());
- let inh = Inherited::new(ccx.tcx, &tables, param_env);
-
- // Compute the fty from point of view of inside fn.
- let fn_scope = ccx.tcx.region_maps.call_site_extent(fn_id, body.id);
- let fn_sig =
- fn_ty.sig.subst(ccx.tcx, &inh.infcx.parameter_environment.free_substs);
- let fn_sig =
- ccx.tcx.liberate_late_bound_regions(fn_scope, &fn_sig);
- let fn_sig =
- inh.normalize_associated_types_in(body.span,
- body.id,
- &fn_sig);
-
- let fcx = check_fn(ccx, fn_ty.unsafety, fn_id, &fn_sig,
- decl, fn_id, body, &inh);
-
- fcx.select_all_obligations_and_apply_defaults();
- upvar::closure_analyze_fn(&fcx, fn_id, decl, body);
- fcx.select_obligations_where_possible();
- fcx.check_casts();
- fcx.select_all_obligations_or_error(); // Casts can introduce new obligations.
-
- regionck::regionck_fn(&fcx, fn_id, fn_span, decl, body);
- writeback::resolve_type_vars_in_fn(&fcx, decl, body);
- }
+ let fn_ty = match raw_fty.sty {
+ ty::TyFnDef(_, _, f) => f,
_ => span_bug!(body.span, "check_bare_fn: function type expected")
- }
+ };
+
+ ccx.inherited(Some(param_env)).enter(|inh| {
+ // Compute the fty from point of view of inside fn.
+ let fn_scope = inh.tcx.region_maps.call_site_extent(fn_id, body.id);
+ let fn_sig =
+ fn_ty.sig.subst(inh.tcx, &inh.parameter_environment.free_substs);
+ let fn_sig =
+ inh.tcx.liberate_late_bound_regions(fn_scope, &fn_sig);
+ let fn_sig =
+ inh.normalize_associated_types_in(body.span, body.id, &fn_sig);
+
+ let fcx = check_fn(&inh, fn_ty.unsafety, fn_id, &fn_sig, decl, fn_id, body);
+
+ fcx.select_all_obligations_and_apply_defaults();
+ fcx.closure_analyze_fn(body);
+ fcx.select_obligations_where_possible();
+ fcx.check_casts();
+ fcx.select_all_obligations_or_error(); // Casts can introduce new obligations.
+
+ fcx.regionck_fn(fn_id, fn_span, decl, body);
+ fcx.resolve_type_vars_in_fn(decl, body);
+ });
}
-struct GatherLocalsVisitor<'a, 'tcx: 'a> {
- fcx: &'a FnCtxt<'a, 'tcx>
+struct GatherLocalsVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> GatherLocalsVisitor<'a, 'gcx, 'tcx> {
fn assign(&mut self, _span: Span, nid: ast::NodeId, ty_opt: Option<Ty<'tcx>>) -> Ty<'tcx> {
match ty_opt {
None => {
// infer the variable's type
- let var_ty = self.fcx.infcx().next_ty_var();
- self.fcx.inh.locals.borrow_mut().insert(nid, var_ty);
+ let var_ty = self.fcx.next_ty_var();
+ self.fcx.locals.borrow_mut().insert(nid, var_ty);
var_ty
}
Some(typ) => {
// take type that the user specified
- self.fcx.inh.locals.borrow_mut().insert(nid, typ);
+ self.fcx.locals.borrow_mut().insert(nid, typ);
typ
}
}
}
}
-impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> {
// Add explicitly-declared locals.
- fn visit_local(&mut self, local: &'tcx hir::Local) {
+ fn visit_local(&mut self, local: &'gcx hir::Local) {
let o_ty = match local.ty {
Some(ref ty) => Some(self.fcx.to_ty(&ty)),
None => None
self.assign(local.span, local.id, o_ty);
debug!("Local variable {:?} is assigned type {}",
local.pat,
- self.fcx.infcx().ty_to_string(
- self.fcx.inh.locals.borrow().get(&local.id).unwrap().clone()));
+ self.fcx.ty_to_string(
+ self.fcx.locals.borrow().get(&local.id).unwrap().clone()));
intravisit::walk_local(self, local);
}
// Add pattern bindings.
- fn visit_pat(&mut self, p: &'tcx hir::Pat) {
+ fn visit_pat(&mut self, p: &'gcx hir::Pat) {
if let PatKind::Ident(_, ref path1, _) = p.node {
- if pat_util::pat_is_binding(&self.fcx.ccx.tcx.def_map.borrow(), p) {
+ if pat_util::pat_is_binding(&self.fcx.tcx.def_map.borrow(), p) {
let var_ty = self.assign(p.span, p.id, None);
self.fcx.require_type_is_sized(var_ty, p.span,
debug!("Pattern binding {} is assigned to {} with type {:?}",
path1.node,
- self.fcx.infcx().ty_to_string(
- self.fcx.inh.locals.borrow().get(&p.id).unwrap().clone()),
+ self.fcx.ty_to_string(
+ self.fcx.locals.borrow().get(&p.id).unwrap().clone()),
var_ty);
}
}
intravisit::walk_pat(self, p);
}
- fn visit_block(&mut self, b: &'tcx hir::Block) {
+ fn visit_block(&mut self, b: &'gcx hir::Block) {
// non-obvious: the `blk` variable maps to region lb, so
// we have to keep this up-to-date. This
// is... unfortunate. It'd be nice to not need this.
// Since an expr occurs as part of the type fixed size arrays we
// need to record the type for that node
- fn visit_ty(&mut self, t: &'tcx hir::Ty) {
+ fn visit_ty(&mut self, t: &'gcx hir::Ty) {
match t.node {
hir::TyFixedLengthVec(ref ty, ref count_expr) => {
self.visit_ty(&ty);
- check_expr_with_hint(self.fcx, &count_expr, self.fcx.tcx().types.usize);
+ self.fcx.check_expr_with_hint(&count_expr, self.fcx.tcx.types.usize);
}
hir::TyBareFn(ref function_declaration) => {
intravisit::walk_fn_decl_nopat(self, &function_declaration.decl);
}
// Don't descend into the bodies of nested closures
- fn visit_fn(&mut self, _: intravisit::FnKind<'tcx>, _: &'tcx hir::FnDecl,
- _: &'tcx hir::Block, _: Span, _: ast::NodeId) { }
+ fn visit_fn(&mut self, _: intravisit::FnKind<'gcx>, _: &'gcx hir::FnDecl,
+ _: &'gcx hir::Block, _: Span, _: ast::NodeId) { }
}
/// Helper used by check_bare_fn and check_expr_fn. Does the grungy work of checking a function
///
/// * ...
/// * inherited: other fields inherited from the enclosing fn (if any)
-fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
- unsafety: hir::Unsafety,
- unsafety_id: ast::NodeId,
- fn_sig: &ty::FnSig<'tcx>,
- decl: &'tcx hir::FnDecl,
- fn_id: ast::NodeId,
- body: &'tcx hir::Block,
- inherited: &'a Inherited<'a, 'tcx>)
- -> FnCtxt<'a, 'tcx>
+fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>,
+ unsafety: hir::Unsafety,
+ unsafety_id: ast::NodeId,
+ fn_sig: &ty::FnSig<'tcx>,
+ decl: &'gcx hir::FnDecl,
+ fn_id: ast::NodeId,
+ body: &'gcx hir::Block)
+ -> FnCtxt<'a, 'gcx, 'tcx>
{
- let tcx = ccx.tcx;
- let err_count_on_creation = tcx.sess.err_count();
+ let tcx = inherited.tcx;
let arg_tys = &fn_sig.inputs;
let ret_ty = fn_sig.output;
// Create the function context. This is either derived from scratch or,
// in the case of function expressions, based on the outer context.
- let fcx = FnCtxt {
- body_id: body.id,
- writeback_errors: Cell::new(false),
- err_count_on_creation: err_count_on_creation,
- ret_ty: ret_ty,
- ps: RefCell::new(UnsafetyState::function(unsafety, unsafety_id)),
- inh: inherited,
- ccx: ccx
- };
+ let fcx = FnCtxt::new(inherited, ret_ty, body.id);
+ *fcx.ps.borrow_mut() = UnsafetyState::function(unsafety, unsafety_id);
if let ty::FnConverging(ret_ty) = ret_ty {
fcx.require_type_is_sized(ret_ty, decl.output.span(), traits::ReturnType);
});
// Check the pattern.
- let pcx = pat_ctxt {
+ let pcx = PatCtxt {
fcx: &fcx,
map: pat_id_map(&tcx.def_map, &input.pat),
};
- _match::check_pat(&pcx, &input.pat, *arg_ty);
+ pcx.check_pat(&input.pat, *arg_ty);
}
visit.visit_block(body);
}
- check_block_with_expected(&fcx, body, match ret_ty {
+ fcx.check_block_with_expected(body, match ret_ty {
ty::FnConverging(result_type) => ExpectHasType(result_type),
ty::FnDiverging => NoExpectation
});
check_bare_fn(ccx, &sig.decl, body, id, span, fty, param_env);
}
-fn report_forbidden_specialization(tcx: &TyCtxt,
- impl_item: &hir::ImplItem,
- parent_impl: DefId)
+fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ impl_item: &hir::ImplItem,
+ parent_impl: DefId)
{
let mut err = struct_span_err!(
tcx.sess, impl_item.span, E0520,
err.emit();
}
-fn check_specialization_validity<'tcx>(tcx: &TyCtxt<'tcx>, trait_def: &ty::TraitDef<'tcx>,
- impl_id: DefId, impl_item: &hir::ImplItem)
+fn check_specialization_validity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ trait_def: &ty::TraitDef<'tcx>,
+ impl_id: DefId,
+ impl_item: &hir::ImplItem)
{
let ancestors = trait_def.ancestors(impl_id);
// Find associated const definition.
if let &ty::ConstTraitItem(ref trait_const) = ty_trait_item {
- compare_const_impl(ccx.tcx,
+ compare_const_impl(ccx,
&impl_const,
impl_item.span,
trait_const,
};
if let &ty::MethodTraitItem(ref trait_method) = ty_trait_item {
- compare_impl_method(ccx.tcx,
+ compare_impl_method(ccx,
&impl_method,
impl_item.span,
body.id,
}
}
-impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> { self.ccx.tcx }
+/// Checks a constant appearing in a type. At the moment this is just the
+/// length expression in a fixed-length vector, but someday it might be
+/// extended to type-level numeric literals.
+fn check_const_in_type<'a,'tcx>(ccx: &'a CrateCtxt<'a,'tcx>,
+ expr: &'tcx hir::Expr,
+ expected_type: Ty<'tcx>) {
+ ccx.inherited(None).enter(|inh| {
+ let fcx = FnCtxt::new(&inh, ty::FnConverging(expected_type), expr.id);
+ fcx.check_const_with_ty(expr.span, expr, expected_type);
+ });
+}
+
+fn check_const<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
+ sp: Span,
+ e: &'tcx hir::Expr,
+ id: ast::NodeId) {
+ ccx.inherited(None).enter(|inh| {
+ let rty = ccx.tcx.node_id_to_type(id);
+ let fcx = FnCtxt::new(&inh, ty::FnConverging(rty), e.id);
+ let declty = fcx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(id)).ty;
+ fcx.check_const_with_ty(sp, e, declty);
+ });
+}
+
+/// Checks whether a type can be represented in memory. In particular, it
+/// identifies types that contain themselves without indirection through a
+/// pointer, which would mean their size is unbounded.
+pub fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ sp: Span,
+ item_id: ast::NodeId,
+ _designation: &str) -> bool {
+ let rty = tcx.node_id_to_type(item_id);
+
+ // Check that it is possible to represent this type. This call identifies
+ // (1) types that contain themselves and (2) types that contain a different
+ // recursive type. It is only necessary to throw an error on those that
+ // contain themselves. For case 2, there must be an inner type that will be
+ // caught by case 1.
+ match rty.is_representable(tcx, sp) {
+ Representability::SelfRecursive => {
+ let item_def_id = tcx.map.local_def_id(item_id);
+ tcx.recursive_type_with_infinite_size_error(item_def_id).emit();
+ return false
+ }
+ Representability::Representable | Representability::ContainsRecursive => (),
+ }
+ return true
+}
+
+pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, id: ast::NodeId) {
+ let t = tcx.node_id_to_type(id);
+ match t.sty {
+ ty::TyStruct(def, substs) => {
+ let fields = &def.struct_variant().fields;
+ if fields.is_empty() {
+ span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
+ return;
+ }
+ let e = fields[0].ty(tcx, substs);
+ if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
+ span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous");
+ return;
+ }
+ match e.sty {
+ ty::TyParam(_) => { /* struct<T>(T, T, T, T) is ok */ }
+ _ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
+ _ => {
+ span_err!(tcx.sess, sp, E0077,
+ "SIMD vector element type should be machine type");
+ return;
+ }
+ }
+ }
+ _ => ()
+ }
+}
+
+#[allow(trivial_numeric_casts)]
+pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
+ sp: Span,
+ vs: &'tcx [hir::Variant],
+ id: ast::NodeId) {
+ let def_id = ccx.tcx.map.local_def_id(id);
+ let hint = *ccx.tcx.lookup_repr_hints(def_id).get(0).unwrap_or(&attr::ReprAny);
+
+ if hint != attr::ReprAny && vs.is_empty() {
+ span_err!(ccx.tcx.sess, sp, E0084,
+ "unsupported representation for zero-variant enum");
+ }
+
+ ccx.inherited(None).enter(|inh| {
+ let rty = ccx.tcx.node_id_to_type(id);
+ let fcx = FnCtxt::new(&inh, ty::FnConverging(rty), id);
+
+ let repr_type_ty = ccx.tcx.enum_repr_type(Some(&hint)).to_ty(ccx.tcx);
+ for v in vs {
+ if let Some(ref e) = v.node.disr_expr {
+ fcx.check_const_with_ty(e.span, e, repr_type_ty);
+ }
+ }
+
+ let def_id = ccx.tcx.map.local_def_id(id);
+
+ let variants = &ccx.tcx.lookup_adt_def(def_id).variants;
+ let mut disr_vals: Vec<ty::Disr> = Vec::new();
+ for (v, variant) in vs.iter().zip(variants.iter()) {
+ let current_disr_val = variant.disr_val;
+
+ // Check for duplicate discriminant values
+ if let Some(i) = disr_vals.iter().position(|&x| x == current_disr_val) {
+ let mut err = struct_span_err!(ccx.tcx.sess, v.span, E0081,
+ "discriminant value `{}` already exists", disr_vals[i]);
+ let variant_i_node_id = ccx.tcx.map.as_local_node_id(variants[i].did).unwrap();
+ span_note!(&mut err, ccx.tcx.map.span(variant_i_node_id),
+ "conflicting discriminant here");
+ err.emit();
+ }
+ disr_vals.push(current_disr_val);
+ }
+ });
+
+ check_representable(ccx.tcx, sp, id, "enum");
+}
+
+impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn get_item_type_scheme(&self, _: Span, id: DefId)
-> Result<ty::TypeScheme<'tcx>, ErrorReported>
}
fn get_free_substs(&self) -> Option<&Substs<'tcx>> {
- Some(&self.inh.infcx.parameter_environment.free_substs)
+ Some(&self.parameter_environment.free_substs)
}
fn get_type_parameter_bounds(&self,
node_id: ast::NodeId)
-> Result<Vec<ty::PolyTraitRef<'tcx>>, ErrorReported>
{
- let def = self.tcx().type_parameter_def(node_id);
- let r = self.inh.infcx.parameter_environment
+ let def = self.tcx.type_parameter_def(node_id);
+ let r = self.parameter_environment
.caller_bounds
.iter()
.filter_map(|predicate| {
assoc_name: ast::Name)
-> bool
{
- let trait_def = self.ccx.tcx.lookup_trait_def(trait_def_id);
+ let trait_def = self.tcx().lookup_trait_def(trait_def_id);
trait_def.associated_type_names.contains(&assoc_name)
}
})
});
- let ty_var = self.infcx().next_ty_var_with_default(default);
+ let ty_var = self.next_ty_var_with_default(default);
// Finally we add the type variable to the substs
match substs {
-> Ty<'tcx>
{
let (trait_ref, _) =
- self.infcx().replace_late_bound_regions_with_fresh_var(
+ self.replace_late_bound_regions_with_fresh_var(
span,
infer::LateBoundRegionConversionTime::AssocTypeProjection(item_name),
&poly_trait_ref);
}
fn set_tainted_by_errors(&self) {
- self.infcx().set_tainted_by_errors()
+ self.infcx.set_tainted_by_errors()
+ }
+}
+
+impl<'a, 'gcx, 'tcx> RegionScope for FnCtxt<'a, 'gcx, 'tcx> {
+ fn object_lifetime_default(&self, span: Span) -> Option<ty::Region> {
+ Some(self.base_object_lifetime_default(span))
+ }
+
+ fn base_object_lifetime_default(&self, span: Span) -> ty::Region {
+ // RFC #599 specifies that object lifetime defaults take
+ // precedence over other defaults. But within a fn body we
+ // don't have a *default* region, rather we use inference to
+ // find the *correct* region, which is strictly more general
+ // (and anyway, within a fn body the right region may not even
+ // be something the user can write explicitly, since it might
+ // be some expression).
+ self.next_region_var(infer::MiscVariable(span))
+ }
+
+ fn anon_regions(&self, span: Span, count: usize)
+ -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>> {
+ Ok((0..count).map(|_| {
+ self.next_region_var(infer::MiscVariable(span))
+ }).collect())
}
}
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> { self.ccx.tcx }
+/// Whether `autoderef` requires types to resolve.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum UnresolvedTypeAction {
+ /// Produce an error and return `TyError` whenever a type cannot
+ /// be resolved (i.e. it is `TyInfer`).
+ Error,
+ /// Go on without emitting any errors, and return the unresolved
+ /// type. Useful for probing, e.g. in coercions.
+ Ignore
+}
+
+/// Controls whether the arguments are tupled. This is used for the call
+/// operator.
+///
+/// Tupling means that all call-side arguments are packed into a tuple and
+/// passed as a single parameter. For example, if tupling is enabled, this
+/// function:
+///
+/// fn f(x: (isize, isize))
+///
+/// Can be called as:
+///
+/// f(1, 2);
+///
+/// Instead of:
+///
+/// f((1, 2));
+#[derive(Clone, Eq, PartialEq)]
+enum TupleArgumentsFlag {
+ DontTupleArguments,
+ TupleArguments,
+}
- pub fn infcx(&self) -> &infer::InferCtxt<'a,'tcx> {
- &self.inh.infcx
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn new(inh: &'a Inherited<'a, 'gcx, 'tcx>,
+ rty: ty::FnOutput<'tcx>,
+ body_id: ast::NodeId)
+ -> FnCtxt<'a, 'gcx, 'tcx> {
+ FnCtxt {
+ body_id: body_id,
+ writeback_errors: Cell::new(false),
+ err_count_on_creation: inh.tcx.sess.err_count(),
+ ret_ty: rty,
+ ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, 0)),
+ inh: inh,
+ }
}
- pub fn param_env(&self) -> &ty::ParameterEnvironment<'a,'tcx> {
- &self.inh.infcx.parameter_environment
+ pub fn param_env(&self) -> &ty::ParameterEnvironment<'tcx> {
+ &self.parameter_environment
}
pub fn sess(&self) -> &Session {
- &self.tcx().sess
+ &self.tcx.sess
}
pub fn err_count_since_creation(&self) -> usize {
- self.ccx.tcx.sess.err_count() - self.err_count_on_creation
+ self.tcx.sess.err_count() - self.err_count_on_creation
}
/// Resolves type variables in `ty` if possible. Unlike the infcx
- /// version, this version will also select obligations if it seems
- /// useful, in an effort to get more type information.
- fn resolve_type_vars_if_possible(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
- debug!("resolve_type_vars_if_possible(ty={:?})", ty);
+ /// version (resolve_type_vars_if_possible), this version will
+ /// also select obligations if it seems useful, in an effort
+ /// to get more type information.
+ fn resolve_type_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
+ debug!("resolve_type_vars_with_obligations(ty={:?})", ty);
// No TyInfer()? Nothing needs doing.
if !ty.has_infer_types() {
- debug!("resolve_type_vars_if_possible: ty={:?}", ty);
+ debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
}
// If `ty` is a type variable, see whether we already know what it is.
- ty = self.infcx().resolve_type_vars_if_possible(&ty);
+ ty = self.resolve_type_vars_if_possible(&ty);
if !ty.has_infer_types() {
- debug!("resolve_type_vars_if_possible: ty={:?}", ty);
+ debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
}
// indirect dependencies that don't seem worth tracking
// precisely.
self.select_obligations_where_possible();
- ty = self.infcx().resolve_type_vars_if_possible(&ty);
+ ty = self.resolve_type_vars_if_possible(&ty);
- debug!("resolve_type_vars_if_possible: ty={:?}", ty);
+ debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
ty
}
fn record_deferred_call_resolution(&self,
closure_def_id: DefId,
- r: DeferredCallResolutionHandler<'tcx>) {
- let mut deferred_call_resolutions = self.inh.deferred_call_resolutions.borrow_mut();
+ r: DeferredCallResolutionHandler<'gcx, 'tcx>) {
+ let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
deferred_call_resolutions.entry(closure_def_id).or_insert(vec![]).push(r);
}
fn remove_deferred_call_resolutions(&self,
closure_def_id: DefId)
- -> Vec<DeferredCallResolutionHandler<'tcx>>
+ -> Vec<DeferredCallResolutionHandler<'gcx, 'tcx>>
{
- let mut deferred_call_resolutions = self.inh.deferred_call_resolutions.borrow_mut();
+ let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
deferred_call_resolutions.remove(&closure_def_id).unwrap_or(Vec::new())
}
}
pub fn local_ty(&self, span: Span, nid: ast::NodeId) -> Ty<'tcx> {
- match self.inh.locals.borrow().get(&nid) {
+ match self.locals.borrow().get(&nid) {
Some(&t) => t,
None => {
- span_err!(self.tcx().sess, span, E0513,
+ span_err!(self.tcx.sess, span, E0513,
"no type for local variable {}",
nid);
- self.tcx().types.err
+ self.tcx.types.err
}
}
}
pub fn write_ty(&self, node_id: ast::NodeId, ty: Ty<'tcx>) {
debug!("write_ty({}, {:?}) in fcx {}",
node_id, ty, self.tag());
- self.inh.tables.borrow_mut().node_types.insert(node_id, ty);
+ self.tables.borrow_mut().node_types.insert(node_id, ty);
}
pub fn write_substs(&self, node_id: ast::NodeId, substs: ty::ItemSubsts<'tcx>) {
substs,
self.tag());
- self.inh.tables.borrow_mut().item_substs.insert(node_id, substs);
+ self.tables.borrow_mut().item_substs.insert(node_id, substs);
}
}
return;
}
- self.inh.tables.borrow_mut().adjustments.insert(node_id, adj);
+ self.tables.borrow_mut().adjustments.insert(node_id, adj);
}
/// Basically whenever we are converting from a type scheme into
-> T
where T : TypeFoldable<'tcx>
{
- let value = value.subst(self.tcx(), substs);
+ let value = value.subst(self.tcx, substs);
let result = self.normalize_associated_types_in(span, &value);
debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}",
value,
let cause = traits::ObligationCause::new(span,
self.body_id,
traits::ObligationCauseCode::MiscObligation);
- self.inh
- .fulfillment_cx
+ self.fulfillment_cx
.borrow_mut()
- .normalize_projection_type(self.infcx(),
+ .normalize_projection_type(self,
ty::ProjectionTy {
trait_ref: trait_ref,
item_name: item_name,
{
debug!("instantiate_type(did={:?}, path={:?})", did, path);
let type_scheme =
- self.tcx().lookup_item_type(did);
+ self.tcx.lookup_item_type(did);
let type_predicates =
- self.tcx().lookup_predicates(did);
- let substs = astconv::ast_path_substs_for_ty(self, self,
+ self.tcx.lookup_predicates(did);
+ let substs = AstConv::ast_path_substs_for_ty(self, self,
path.span,
PathParamMode::Optional,
&type_scheme.generics,
{
let (adt, variant) = match def {
Def::Variant(enum_id, variant_id) => {
- let adt = self.tcx().lookup_adt_def(enum_id);
+ let adt = self.tcx.lookup_adt_def(enum_id);
(adt, adt.variant_with_id(variant_id))
}
Def::Struct(did) | Def::TyAlias(did) => {
- let typ = self.tcx().lookup_item_type(did);
+ let typ = self.tcx.lookup_item_type(did);
if let ty::TyStruct(adt, _) = typ.ty.sty {
(adt, adt.struct_variant())
} else {
}
pub fn write_nil(&self, node_id: ast::NodeId) {
- self.write_ty(node_id, self.tcx().mk_nil());
+ self.write_ty(node_id, self.tcx.mk_nil());
}
pub fn write_error(&self, node_id: ast::NodeId) {
- self.write_ty(node_id, self.tcx().types.err);
+ self.write_ty(node_id, self.tcx.types.err);
}
pub fn require_type_meets(&self,
builtin_bound: ty::BuiltinBound,
cause: traits::ObligationCause<'tcx>)
{
- self.inh.fulfillment_cx.borrow_mut()
- .register_builtin_bound(self.infcx(), ty, builtin_bound, cause);
+ self.fulfillment_cx.borrow_mut()
+ .register_builtin_bound(self, ty, builtin_bound, cause);
}
pub fn register_predicate(&self,
{
debug!("register_predicate({:?})",
obligation);
- self.inh.fulfillment_cx
+ self.fulfillment_cx
.borrow_mut()
- .register_predicate_obligation(self.infcx(), obligation);
+ .register_predicate_obligation(self, obligation);
}
pub fn to_ty(&self, ast_t: &hir::Ty) -> Ty<'tcx> {
- let t = ast_ty_to_ty(self, self, ast_t);
+ let t = AstConv::ast_ty_to_ty(self, self, ast_t);
self.register_wf_obligation(t, ast_t.span, traits::MiscObligation);
t
}
pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> {
- match self.inh.tables.borrow().node_types.get(&ex.id) {
+ match self.tables.borrow().node_types.get(&ex.id) {
Some(&t) => t,
None => {
bug!("no type for expr in fcx {}", self.tag());
-> Ty<'tcx>
{
let raw_ty = self.expr_ty(expr);
- let raw_ty = self.infcx().shallow_resolve(raw_ty);
- let resolve_ty = |ty: Ty<'tcx>| self.infcx().resolve_type_vars_if_possible(&ty);
- raw_ty.adjust(self.tcx(), expr.span, expr.id, adjustment, |method_call| {
- self.inh.tables.borrow().method_map.get(&method_call)
+ let raw_ty = self.shallow_resolve(raw_ty);
+ let resolve_ty = |ty: Ty<'tcx>| self.resolve_type_vars_if_possible(&ty);
+ raw_ty.adjust(self.tcx, expr.span, expr.id, adjustment, |method_call| {
+ self.tables.borrow().method_map.get(&method_call)
.map(|method| resolve_ty(method.ty))
})
}
pub fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> {
- match self.inh.tables.borrow().node_types.get(&id) {
+ match self.tables.borrow().node_types.get(&id) {
Some(&t) => t,
- None if self.err_count_since_creation() != 0 => self.tcx().types.err,
+ None if self.err_count_since_creation() != 0 => self.tcx.types.err,
None => {
bug!("no type for node {}: {} in fcx {}",
- id, self.tcx().map.node_to_string(id),
+ id, self.tcx.map.node_to_string(id),
self.tag());
}
}
&tables.item_substs
}
- Ref::map(self.inh.tables.borrow(), project_item_susbts)
+ Ref::map(self.tables.borrow(), project_item_susbts)
}
pub fn opt_node_ty_substs<F>(&self,
f: F) where
F: FnOnce(&ty::ItemSubsts<'tcx>),
{
- match self.inh.tables.borrow().item_substs.get(&id) {
+ match self.tables.borrow().item_substs.get(&id) {
Some(s) => { f(s) }
None => { }
}
}
- pub fn mk_subty(&self,
- a_is_expected: bool,
- origin: TypeOrigin,
- sub: Ty<'tcx>,
- sup: Ty<'tcx>)
- -> Result<(), TypeError<'tcx>> {
- infer::mk_subty(self.infcx(), a_is_expected, origin, sub, sup)
- // FIXME(#32730) propagate obligations
- .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
- }
-
- pub fn mk_eqty(&self,
- a_is_expected: bool,
- origin: TypeOrigin,
- sub: Ty<'tcx>,
- sup: Ty<'tcx>)
- -> Result<(), TypeError<'tcx>> {
- infer::mk_eqty(self.infcx(), a_is_expected, origin, sub, sup)
- // FIXME(#32730) propagate obligations
- .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()))
- }
-
- pub fn mk_subr(&self,
- origin: infer::SubregionOrigin<'tcx>,
- sub: ty::Region,
- sup: ty::Region) {
- infer::mk_subr(self.infcx(), origin, sub, sup)
- }
-
- pub fn type_error_message<M>(&self,
- sp: Span,
- mk_msg: M,
- actual_ty: Ty<'tcx>,
- err: Option<&TypeError<'tcx>>)
- where M: FnOnce(String) -> String,
- {
- self.infcx().type_error_message(sp, mk_msg, actual_ty, err);
- }
-
- pub fn type_error_struct<M>(&self,
- sp: Span,
- mk_msg: M,
- actual_ty: Ty<'tcx>,
- err: Option<&TypeError<'tcx>>)
- -> DiagnosticBuilder<'tcx>
- where M: FnOnce(String) -> String,
- {
- self.infcx().type_error_struct(sp, mk_msg, actual_ty, err)
- }
-
/// Registers an obligation for checking later, during regionck, that the type `ty` must
/// outlive the region `r`.
pub fn register_region_obligation(&self,
region: ty::Region,
cause: traits::ObligationCause<'tcx>)
{
- let mut fulfillment_cx = self.inh.fulfillment_cx.borrow_mut();
+ let mut fulfillment_cx = self.fulfillment_cx.borrow_mut();
fulfillment_cx.register_region_obligation(ty, region, cause);
}
-> Ty<'tcx>
{
self.normalize_associated_types_in(span,
- &field.ty(self.tcx(), substs))
+ &field.ty(self.tcx, substs))
}
fn check_casts(&self) {
- let mut deferred_cast_checks = self.inh.deferred_cast_checks.borrow_mut();
+ let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
for cast in deferred_cast_checks.drain(..) {
cast.check(self);
}
// encountered type-checking errors. Therefore, if we think we saw
// some errors in this function, just resolve all uninstanted type
// varibles to TyError.
- if self.infcx().is_tainted_by_errors() {
- for ty in &self.infcx().unsolved_variables() {
- if let ty::TyInfer(_) = self.infcx().shallow_resolve(ty).sty {
+ if self.is_tainted_by_errors() {
+ for ty in &self.unsolved_variables() {
+ if let ty::TyInfer(_) = self.shallow_resolve(ty).sty {
debug!("default_type_parameters: defaulting `{:?}` to error", ty);
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.err);
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx().types.err);
}
}
return;
}
- for ty in &self.infcx().unsolved_variables() {
- let resolved = self.infcx().resolve_type_vars_if_possible(ty);
- if self.infcx().type_var_diverges(resolved) {
+ for ty in &self.unsolved_variables() {
+ let resolved = self.resolve_type_vars_if_possible(ty);
+ if self.type_var_diverges(resolved) {
debug!("default_type_parameters: defaulting `{:?}` to `()` because it diverges",
resolved);
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.mk_nil());
} else {
- match self.infcx().type_is_unconstrained_numeric(resolved) {
+ match self.type_is_unconstrained_numeric(resolved) {
UnconstrainedInt => {
debug!("default_type_parameters: defaulting `{:?}` to `i32`",
resolved);
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.types.i32)
},
UnconstrainedFloat => {
debug!("default_type_parameters: defaulting `{:?}` to `f32`",
resolved);
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.types.f64)
}
Neither => { }
}
}
fn select_all_obligations_and_apply_defaults(&self) {
- if self.tcx().sess.features.borrow().default_type_parameter_fallback {
+ if self.tcx.sess.features.borrow().default_type_parameter_fallback {
self.new_select_all_obligations_and_apply_defaults();
} else {
self.old_select_all_obligations_and_apply_defaults();
// For the time being this errs on the side of being memory wasteful but provides better
// error reporting.
- // let type_variables = self.infcx().type_variables.clone();
+ // let type_variables = self.type_variables.clone();
// There is a possibility that this algorithm will have to run an arbitrary number of times
// to terminate so we bound it by the compiler's recursion limit.
- for _ in 0..self.tcx().sess.recursion_limit.get() {
+ for _ in 0..self.tcx.sess.recursion_limit.get() {
// First we try to solve all obligations, it is possible that the last iteration
// has made it possible to make more progress.
self.select_obligations_where_possible();
let mut conflicts = Vec::new();
// Collect all unsolved type, integral and floating point variables.
- let unsolved_variables = self.inh.infcx.unsolved_variables();
+ let unsolved_variables = self.unsolved_variables();
// We must collect the defaults *before* we do any unification. Because we have
// directly attached defaults to the type variables any unification that occurs
let default_map: FnvHashMap<_, _> =
unsolved_variables
.iter()
- .filter_map(|t| self.infcx().default(t).map(|d| (t, d)))
+ .filter_map(|t| self.default(t).map(|d| (t, d)))
.collect();
let mut unbound_tyvars = HashSet::new();
// variables. We do this so we only apply literal fallback to type
// variables without defaults.
for ty in &unsolved_variables {
- let resolved = self.infcx().resolve_type_vars_if_possible(ty);
- if self.infcx().type_var_diverges(resolved) {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ let resolved = self.resolve_type_vars_if_possible(ty);
+ if self.type_var_diverges(resolved) {
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.mk_nil());
} else {
- match self.infcx().type_is_unconstrained_numeric(resolved) {
+ match self.type_is_unconstrained_numeric(resolved) {
UnconstrainedInt | UnconstrainedFloat => {
unbound_tyvars.insert(resolved);
},
// the type variable with a defined fallback.
for ty in &unsolved_variables {
if let Some(_default) = default_map.get(ty) {
- let resolved = self.infcx().resolve_type_vars_if_possible(ty);
+ let resolved = self.resolve_type_vars_if_possible(ty);
- debug!("select_all_obligations_and_apply_defaults: ty: {:?} with default: {:?}",
+ debug!("select_all_obligations_and_apply_defaults: \
+ ty: {:?} with default: {:?}",
ty, _default);
match resolved.sty {
// for conflicts and correctly report them.
- let _ = self.infcx().commit_if_ok(|_: &infer::CombinedSnapshot| {
+ let _ = self.commit_if_ok(|_: &infer::CombinedSnapshot| {
for ty in &unbound_tyvars {
- if self.infcx().type_var_diverges(ty) {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ if self.type_var_diverges(ty) {
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.mk_nil());
} else {
- match self.infcx().type_is_unconstrained_numeric(ty) {
+ match self.type_is_unconstrained_numeric(ty) {
UnconstrainedInt => {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.types.i32)
},
UnconstrainedFloat => {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.types.f64)
}
Neither => {
if let Some(default) = default_map.get(ty) {
let default = default.clone();
- match infer::mk_eqty(self.infcx(), false,
- TypeOrigin::Misc(default.origin_span),
- ty, default.ty) {
+ match self.eq_types(false,
+ TypeOrigin::Misc(default.origin_span),
+ ty, default.ty) {
Ok(InferOk { obligations, .. }) => {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty())
let conflicting_default =
self.find_conflicting_default(&unbound_tyvars, &default_map, conflict)
.unwrap_or(type_variable::Default {
- ty: self.infcx().next_ty_var(),
+ ty: self.next_ty_var(),
origin_span: codemap::DUMMY_SP,
- def_id: self.tcx().map.local_def_id(0) // what do I put here?
+ def_id: self.tcx.map.local_def_id(0) // what do I put here?
});
// This is to ensure that we elimnate any non-determinism from the error
};
- self.infcx().report_conflicting_default_types(
+ self.report_conflicting_default_types(
first_default.origin_span,
first_default,
second_default)
// We also run this inside snapshot that never commits so we can do error
// reporting for more then one conflict.
for ty in &unbound_tyvars {
- if self.infcx().type_var_diverges(ty) {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil());
+ if self.type_var_diverges(ty) {
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.mk_nil());
} else {
- match self.infcx().type_is_unconstrained_numeric(ty) {
+ match self.type_is_unconstrained_numeric(ty) {
UnconstrainedInt => {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32)
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.types.i32)
},
UnconstrainedFloat => {
- demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64)
+ self.demand_eqtype(codemap::DUMMY_SP, *ty, self.tcx.types.f64)
},
Neither => {
if let Some(default) = default_map.get(ty) {
let default = default.clone();
- match infer::mk_eqty(self.infcx(), false,
- TypeOrigin::Misc(default.origin_span),
- ty, default.ty) {
+ match self.eq_types(false,
+ TypeOrigin::Misc(default.origin_span),
+ ty, default.ty) {
// FIXME(#32730) propagate obligations
Ok(InferOk { obligations, .. }) => assert!(obligations.is_empty()),
Err(_) => {
// upvar inference should have ensured that all deferred call
// resolutions are handled by now.
- assert!(self.inh.deferred_call_resolutions.borrow().is_empty());
- let infcx = self.infcx();
+ assert!(self.deferred_call_resolutions.borrow().is_empty());
self.select_all_obligations_and_apply_defaults();
- let mut fulfillment_cx = self.inh.fulfillment_cx.borrow_mut();
- match fulfillment_cx.select_all_or_error(infcx) {
+ let mut fulfillment_cx = self.fulfillment_cx.borrow_mut();
+ match fulfillment_cx.select_all_or_error(self) {
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(infcx, &errors); }
+ Err(errors) => { self.report_fulfillment_errors(&errors); }
}
- if let Err(ref errors) = fulfillment_cx.select_rfc1592_obligations(infcx) {
- traits::report_fulfillment_errors_as_warnings(infcx, errors, self.body_id);
+ if let Err(ref errors) = fulfillment_cx.select_rfc1592_obligations(self) {
+ self.report_fulfillment_errors_as_warnings(errors, self.body_id);
}
}
/// Select as many obligations as we can at present.
fn select_obligations_where_possible(&self) {
- match
- self.inh.fulfillment_cx
- .borrow_mut()
- .select_where_possible(self.infcx())
- {
+ match self.fulfillment_cx.borrow_mut().select_where_possible(self) {
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(self.infcx(), &errors); }
+ Err(errors) => { self.report_fulfillment_errors(&errors); }
}
}
-}
-impl<'a, 'tcx> RegionScope for FnCtxt<'a, 'tcx> {
- fn object_lifetime_default(&self, span: Span) -> Option<ty::Region> {
- Some(self.base_object_lifetime_default(span))
- }
+ /// Executes an autoderef loop for the type `t`. At each step, invokes `should_stop`
+ /// to decide whether to terminate the loop. Returns the final type and number of
+ /// derefs that it performed.
+ ///
+ /// Note: this method does not modify the adjustments table. The caller is responsible for
+ /// inserting an AutoAdjustment record into the `self` using one of the suitable methods.
+ pub fn autoderef<'b, E, I, T, F>(&self,
+ sp: Span,
+ base_ty: Ty<'tcx>,
+ maybe_exprs: E,
+ unresolved_type_action: UnresolvedTypeAction,
+ mut lvalue_pref: LvaluePreference,
+ mut should_stop: F)
+ -> (Ty<'tcx>, usize, Option<T>)
+ // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
+ where E: Fn() -> I,
+ I: IntoIterator<Item=&'b hir::Expr>,
+ F: FnMut(Ty<'tcx>, usize) -> Option<T>,
+ {
+ debug!("autoderef(base_ty={:?}, lvalue_pref={:?})",
+ base_ty, lvalue_pref);
+
+ let mut t = base_ty;
+ for autoderefs in 0..self.tcx.sess.recursion_limit.get() {
+ let resolved_t = match unresolved_type_action {
+ UnresolvedTypeAction::Error => {
+ self.structurally_resolved_type(sp, t)
+ }
+ UnresolvedTypeAction::Ignore => {
+ // We can continue even when the type cannot be resolved
+ // (i.e. it is an inference variable) because `Ty::builtin_deref`
+ // and `try_overloaded_deref` both simply return `None`
+ // in such a case without producing spurious errors.
+ self.resolve_type_vars_if_possible(&t)
+ }
+ };
+ if resolved_t.references_error() {
+ return (resolved_t, autoderefs, None);
+ }
- fn base_object_lifetime_default(&self, span: Span) -> ty::Region {
- // RFC #599 specifies that object lifetime defaults take
- // precedence over other defaults. But within a fn body we
- // don't have a *default* region, rather we use inference to
- // find the *correct* region, which is strictly more general
- // (and anyway, within a fn body the right region may not even
- // be something the user can write explicitly, since it might
- // be some expression).
- self.infcx().next_region_var(infer::MiscVariable(span))
- }
-
- fn anon_regions(&self, span: Span, count: usize)
- -> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>> {
- Ok((0..count).map(|_| {
- self.infcx().next_region_var(infer::MiscVariable(span))
- }).collect())
- }
-}
+ match should_stop(resolved_t, autoderefs) {
+ Some(x) => return (resolved_t, autoderefs, Some(x)),
+ None => {}
+ }
-/// Whether `autoderef` requires types to resolve.
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum UnresolvedTypeAction {
- /// Produce an error and return `TyError` whenever a type cannot
- /// be resolved (i.e. it is `TyInfer`).
- Error,
- /// Go on without emitting any errors, and return the unresolved
- /// type. Useful for probing, e.g. in coercions.
- Ignore
-}
+ // Otherwise, deref if type is derefable:
+
+ // Super subtle: it might seem as though we should
+ // pass `opt_expr` to `try_overloaded_deref`, so that
+ // the (implicit) autoref of using an overloaded deref
+ // would get added to the adjustment table. However we
+ // do not do that, because it's kind of a
+ // "meta-adjustment" -- instead, we just leave it
+ // unrecorded and know that there "will be" an
+ // autoref. regionck and other bits of the code base,
+ // when they encounter an overloaded autoderef, have
+ // to do some reconstructive surgery. This is a pretty
+ // complex mess that is begging for a proper MIR.
+ let mt = if let Some(mt) = resolved_t.builtin_deref(false, lvalue_pref) {
+ mt
+ } else if let Some(method) = self.try_overloaded_deref(sp, None,
+ resolved_t, lvalue_pref) {
+ for expr in maybe_exprs() {
+ let method_call = MethodCall::autoderef(expr.id, autoderefs as u32);
+ self.tables.borrow_mut().method_map.insert(method_call, method);
+ }
+ self.make_overloaded_lvalue_return_type(method)
+ } else {
+ return (resolved_t, autoderefs, None);
+ };
-/// Executes an autoderef loop for the type `t`. At each step, invokes `should_stop` to decide
-/// whether to terminate the loop. Returns the final type and number of derefs that it performed.
-///
-/// Note: this method does not modify the adjustments table. The caller is responsible for
-/// inserting an AutoAdjustment record into the `fcx` using one of the suitable methods.
-pub fn autoderef<'a, 'b, 'tcx, E, I, T, F>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- base_ty: Ty<'tcx>,
- maybe_exprs: E,
- unresolved_type_action: UnresolvedTypeAction,
- mut lvalue_pref: LvaluePreference,
- mut should_stop: F)
- -> (Ty<'tcx>, usize, Option<T>)
- // FIXME(eddyb) use copyable iterators when that becomes ergonomic.
- where E: Fn() -> I,
- I: IntoIterator<Item=&'b hir::Expr>,
- F: FnMut(Ty<'tcx>, usize) -> Option<T>,
-{
- debug!("autoderef(base_ty={:?}, lvalue_pref={:?})",
- base_ty, lvalue_pref);
-
- let mut t = base_ty;
- for autoderefs in 0..fcx.tcx().sess.recursion_limit.get() {
- let resolved_t = match unresolved_type_action {
- UnresolvedTypeAction::Error => {
- structurally_resolved_type(fcx, sp, t)
+ t = mt.ty;
+ if mt.mutbl == hir::MutImmutable {
+ lvalue_pref = NoPreference;
}
- UnresolvedTypeAction::Ignore => {
- // We can continue even when the type cannot be resolved
- // (i.e. it is an inference variable) because `Ty::builtin_deref`
- // and `try_overloaded_deref` both simply return `None`
- // in such a case without producing spurious errors.
- fcx.infcx().resolve_type_vars_if_possible(&t)
- }
- };
- if resolved_t.references_error() {
- return (resolved_t, autoderefs, None);
}
- match should_stop(resolved_t, autoderefs) {
- Some(x) => return (resolved_t, autoderefs, Some(x)),
- None => {}
- }
+ // We've reached the recursion limit, error gracefully.
+ span_err!(self.tcx.sess, sp, E0055,
+ "reached the recursion limit while auto-dereferencing {:?}",
+ base_ty);
+ (self.tcx.types.err, 0, None)
+ }
- // Otherwise, deref if type is derefable:
-
- // Super subtle: it might seem as though we should
- // pass `opt_expr` to `try_overloaded_deref`, so that
- // the (implicit) autoref of using an overloaded deref
- // would get added to the adjustment table. However we
- // do not do that, because it's kind of a
- // "meta-adjustment" -- instead, we just leave it
- // unrecorded and know that there "will be" an
- // autoref. regionck and other bits of the code base,
- // when they encounter an overloaded autoderef, have
- // to do some reconstructive surgery. This is a pretty
- // complex mess that is begging for a proper MIR.
- let mt = if let Some(mt) = resolved_t.builtin_deref(false, lvalue_pref) {
- mt
- } else if let Some(method) = try_overloaded_deref(fcx, sp, None,
- resolved_t, lvalue_pref) {
- for expr in maybe_exprs() {
- let method_call = MethodCall::autoderef(expr.id, autoderefs as u32);
- fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
+ fn try_overloaded_deref(&self,
+ span: Span,
+ base_expr: Option<&hir::Expr>,
+ base_ty: Ty<'tcx>,
+ lvalue_pref: LvaluePreference)
+ -> Option<MethodCallee<'tcx>>
+ {
+ // Try DerefMut first, if preferred.
+ let method = match (lvalue_pref, self.tcx.lang_items.deref_mut_trait()) {
+ (PreferMutLvalue, Some(trait_did)) => {
+ self.lookup_method_in_trait(span, base_expr,
+ token::intern("deref_mut"), trait_did,
+ base_ty, None)
}
- make_overloaded_lvalue_return_type(fcx.tcx(), method)
- } else {
- return (resolved_t, autoderefs, None);
+ _ => None
};
- t = mt.ty;
- if mt.mutbl == hir::MutImmutable {
- lvalue_pref = NoPreference;
- }
+ // Otherwise, fall back to Deref.
+ let method = match (method, self.tcx.lang_items.deref_trait()) {
+ (None, Some(trait_did)) => {
+ self.lookup_method_in_trait(span, base_expr,
+ token::intern("deref"), trait_did,
+ base_ty, None)
+ }
+ (method, _) => method
+ };
+
+ method
}
- // We've reached the recursion limit, error gracefully.
- span_err!(fcx.tcx().sess, sp, E0055,
- "reached the recursion limit while auto-dereferencing {:?}",
- base_ty);
- (fcx.tcx().types.err, 0, None)
-}
+ /// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait
+ /// returns a type of `&T`, but the actual type we assign to the
+ /// *expression* is `T`. So this function just peels off the return
+ /// type by one layer to yield `T`.
+ fn make_overloaded_lvalue_return_type(&self,
+ method: MethodCallee<'tcx>)
+ -> ty::TypeAndMut<'tcx>
+ {
+ // extract method return type, which will be &T;
+ // all LB regions should have been instantiated during method lookup
+ let ret_ty = method.ty.fn_ret();
+ let ret_ty = self.tcx.no_late_bound_regions(&ret_ty).unwrap().unwrap();
+
+ // method returns &T, but the type as visible to user is T, so deref
+ ret_ty.builtin_deref(true, NoPreference).unwrap()
+ }
+
+ fn lookup_indexing(&self,
+ expr: &hir::Expr,
+ base_expr: &'gcx hir::Expr,
+ base_ty: Ty<'tcx>,
+ idx_ty: Ty<'tcx>,
+ lvalue_pref: LvaluePreference)
+ -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
+ {
+ // FIXME(#18741) -- this is almost but not quite the same as the
+ // autoderef that normal method probing does. They could likely be
+ // consolidated.
+
+ let (ty, autoderefs, final_mt) = self.autoderef(base_expr.span,
+ base_ty,
+ || Some(base_expr),
+ UnresolvedTypeAction::Error,
+ lvalue_pref,
+ |adj_ty, idx| {
+ self.try_index_step(MethodCall::expr(expr.id), expr, base_expr,
+ adj_ty, idx, false, lvalue_pref, idx_ty)
+ });
-fn try_overloaded_deref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span,
- base_expr: Option<&hir::Expr>,
- base_ty: Ty<'tcx>,
- lvalue_pref: LvaluePreference)
- -> Option<MethodCallee<'tcx>>
-{
- // Try DerefMut first, if preferred.
- let method = match (lvalue_pref, fcx.tcx().lang_items.deref_mut_trait()) {
- (PreferMutLvalue, Some(trait_did)) => {
- method::lookup_in_trait(fcx, span, base_expr,
- token::intern("deref_mut"), trait_did,
- base_ty, None)
+ if final_mt.is_some() {
+ return final_mt;
}
- _ => None
- };
- // Otherwise, fall back to Deref.
- let method = match (method, fcx.tcx().lang_items.deref_trait()) {
- (None, Some(trait_did)) => {
- method::lookup_in_trait(fcx, span, base_expr,
- token::intern("deref"), trait_did,
- base_ty, None)
+ // After we have fully autoderef'd, if the resulting type is [T; n], then
+ // do a final unsized coercion to yield [T].
+ if let ty::TyArray(element_ty, _) = ty.sty {
+ let adjusted_ty = self.tcx.mk_slice(element_ty);
+ self.try_index_step(MethodCall::expr(expr.id), expr, base_expr,
+ adjusted_ty, autoderefs, true, lvalue_pref, idx_ty)
+ } else {
+ None
}
- (method, _) => method
- };
-
- method
-}
-
-/// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait returns a type of `&T`, but the
-/// actual type we assign to the *expression* is `T`. So this function just peels off the return
-/// type by one layer to yield `T`.
-fn make_overloaded_lvalue_return_type<'tcx>(tcx: &TyCtxt<'tcx>,
- method: MethodCallee<'tcx>)
- -> ty::TypeAndMut<'tcx>
-{
- // extract method return type, which will be &T;
- // all LB regions should have been instantiated during method lookup
- let ret_ty = method.ty.fn_ret();
- let ret_ty = tcx.no_late_bound_regions(&ret_ty).unwrap().unwrap();
-
- // method returns &T, but the type as visible to user is T, so deref
- ret_ty.builtin_deref(true, NoPreference).unwrap()
-}
-
-fn lookup_indexing<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &hir::Expr,
- base_expr: &'tcx hir::Expr,
- base_ty: Ty<'tcx>,
- idx_ty: Ty<'tcx>,
- lvalue_pref: LvaluePreference)
- -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
-{
- // FIXME(#18741) -- this is almost but not quite the same as the
- // autoderef that normal method probing does. They could likely be
- // consolidated.
-
- let (ty, autoderefs, final_mt) = autoderef(fcx,
- base_expr.span,
- base_ty,
- || Some(base_expr),
- UnresolvedTypeAction::Error,
- lvalue_pref,
- |adj_ty, idx| {
- try_index_step(fcx, MethodCall::expr(expr.id), expr, base_expr,
- adj_ty, idx, false, lvalue_pref, idx_ty)
- });
-
- if final_mt.is_some() {
- return final_mt;
}
- // After we have fully autoderef'd, if the resulting type is [T; n], then
- // do a final unsized coercion to yield [T].
- if let ty::TyArray(element_ty, _) = ty.sty {
- let adjusted_ty = fcx.tcx().mk_slice(element_ty);
- try_index_step(fcx, MethodCall::expr(expr.id), expr, base_expr,
- adjusted_ty, autoderefs, true, lvalue_pref, idx_ty)
- } else {
- None
- }
-}
-
-/// To type-check `base_expr[index_expr]`, we progressively autoderef (and otherwise adjust)
-/// `base_expr`, looking for a type which either supports builtin indexing or overloaded indexing.
-/// This loop implements one step in that search; the autoderef loop is implemented by
-/// `lookup_indexing`.
-fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- method_call: MethodCall,
- expr: &hir::Expr,
- base_expr: &'tcx hir::Expr,
- adjusted_ty: Ty<'tcx>,
- autoderefs: usize,
- unsize: bool,
- lvalue_pref: LvaluePreference,
- index_ty: Ty<'tcx>)
- -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
-{
- let tcx = fcx.tcx();
- debug!("try_index_step(expr={:?}, base_expr.id={:?}, adjusted_ty={:?}, \
- autoderefs={}, unsize={}, index_ty={:?})",
- expr,
- base_expr,
- adjusted_ty,
- autoderefs,
- unsize,
- index_ty);
-
- let input_ty = fcx.infcx().next_ty_var();
-
- // First, try built-in indexing.
- match (adjusted_ty.builtin_index(), &index_ty.sty) {
- (Some(ty), &ty::TyUint(ast::UintTy::Us)) | (Some(ty), &ty::TyInfer(ty::IntVar(_))) => {
- debug!("try_index_step: success, using built-in indexing");
- // If we had `[T; N]`, we should've caught it before unsizing to `[T]`.
- assert!(!unsize);
- fcx.write_autoderef_adjustment(base_expr.id, autoderefs);
- return Some((tcx.types.usize, ty));
- }
- _ => {}
- }
-
- // Try `IndexMut` first, if preferred.
- let method = match (lvalue_pref, tcx.lang_items.index_mut_trait()) {
- (PreferMutLvalue, Some(trait_did)) => {
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(&base_expr),
- token::intern("index_mut"),
- trait_did,
- autoderefs,
- unsize,
- adjusted_ty,
- Some(vec![input_ty]))
- }
- _ => None,
- };
-
- // Otherwise, fall back to `Index`.
- let method = match (method, tcx.lang_items.index_trait()) {
- (None, Some(trait_did)) => {
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(&base_expr),
- token::intern("index"),
- trait_did,
- autoderefs,
- unsize,
- adjusted_ty,
- Some(vec![input_ty]))
+ /// To type-check `base_expr[index_expr]`, we progressively autoderef
+ /// (and otherwise adjust) `base_expr`, looking for a type which either
+ /// supports builtin indexing or overloaded indexing.
+ /// This loop implements one step in that search; the autoderef loop
+ /// is implemented by `lookup_indexing`.
+ fn try_index_step(&self,
+ method_call: MethodCall,
+ expr: &hir::Expr,
+ base_expr: &'gcx hir::Expr,
+ adjusted_ty: Ty<'tcx>,
+ autoderefs: usize,
+ unsize: bool,
+ lvalue_pref: LvaluePreference,
+ index_ty: Ty<'tcx>)
+ -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
+ {
+ let tcx = self.tcx;
+ debug!("try_index_step(expr={:?}, base_expr.id={:?}, adjusted_ty={:?}, \
+ autoderefs={}, unsize={}, index_ty={:?})",
+ expr,
+ base_expr,
+ adjusted_ty,
+ autoderefs,
+ unsize,
+ index_ty);
+
+ let input_ty = self.next_ty_var();
+
+ // First, try built-in indexing.
+ match (adjusted_ty.builtin_index(), &index_ty.sty) {
+ (Some(ty), &ty::TyUint(ast::UintTy::Us)) | (Some(ty), &ty::TyInfer(ty::IntVar(_))) => {
+ debug!("try_index_step: success, using built-in indexing");
+ // If we had `[T; N]`, we should've caught it before unsizing to `[T]`.
+ assert!(!unsize);
+ self.write_autoderef_adjustment(base_expr.id, autoderefs);
+ return Some((tcx.types.usize, ty));
+ }
+ _ => {}
}
- (method, _) => method,
- };
- // If some lookup succeeds, write callee into table and extract index/element
- // type from the method signature.
- // If some lookup succeeded, install method in table
- method.map(|method| {
- debug!("try_index_step: success, using overloaded indexing");
- fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
- (input_ty, make_overloaded_lvalue_return_type(fcx.tcx(), method).ty)
- })
-}
-
-fn check_method_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- method_fn_ty: Ty<'tcx>,
- callee_expr: &'tcx hir::Expr,
- args_no_rcvr: &'tcx [P<hir::Expr>],
- tuple_arguments: TupleArgumentsFlag,
- expected: Expectation<'tcx>)
- -> ty::FnOutput<'tcx> {
- if method_fn_ty.references_error() {
- let err_inputs = err_args(fcx.tcx(), args_no_rcvr.len());
-
- let err_inputs = match tuple_arguments {
- DontTupleArguments => err_inputs,
- TupleArguments => vec![fcx.tcx().mk_tup(err_inputs)],
+ // Try `IndexMut` first, if preferred.
+ let method = match (lvalue_pref, tcx.lang_items.index_mut_trait()) {
+ (PreferMutLvalue, Some(trait_did)) => {
+ self.lookup_method_in_trait_adjusted(expr.span,
+ Some(&base_expr),
+ token::intern("index_mut"),
+ trait_did,
+ autoderefs,
+ unsize,
+ adjusted_ty,
+ Some(vec![input_ty]))
+ }
+ _ => None,
};
- check_argument_types(fcx,
- sp,
- &err_inputs[..],
- &[],
- args_no_rcvr,
- false,
- tuple_arguments);
- ty::FnConverging(fcx.tcx().types.err)
- } else {
- match method_fn_ty.sty {
- ty::TyFnDef(_, _, ref fty) => {
- // HACK(eddyb) ignore self in the definition (see above).
- let expected_arg_tys = expected_types_for_fn_args(fcx,
- sp,
- expected,
- fty.sig.0.output,
- &fty.sig.0.inputs[1..]);
- check_argument_types(fcx,
- sp,
- &fty.sig.0.inputs[1..],
- &expected_arg_tys[..],
- args_no_rcvr,
- fty.sig.0.variadic,
- tuple_arguments);
- fty.sig.0.output
+ // Otherwise, fall back to `Index`.
+ let method = match (method, tcx.lang_items.index_trait()) {
+ (None, Some(trait_did)) => {
+ self.lookup_method_in_trait_adjusted(expr.span,
+ Some(&base_expr),
+ token::intern("index"),
+ trait_did,
+ autoderefs,
+ unsize,
+ adjusted_ty,
+ Some(vec![input_ty]))
}
- _ => {
- span_bug!(callee_expr.span, "method without bare fn type");
+ (method, _) => method,
+ };
+
+ // If some lookup succeeds, write callee into table and extract index/element
+ // type from the method signature.
+ // If some lookup succeeded, install method in table
+ method.map(|method| {
+ debug!("try_index_step: success, using overloaded indexing");
+ self.tables.borrow_mut().method_map.insert(method_call, method);
+ (input_ty, self.make_overloaded_lvalue_return_type(method).ty)
+ })
+ }
+
+ fn check_method_argument_types(&self,
+ sp: Span,
+ method_fn_ty: Ty<'tcx>,
+ callee_expr: &'gcx hir::Expr,
+ args_no_rcvr: &'gcx [P<hir::Expr>],
+ tuple_arguments: TupleArgumentsFlag,
+ expected: Expectation<'tcx>)
+ -> ty::FnOutput<'tcx> {
+ if method_fn_ty.references_error() {
+ let err_inputs = self.err_args(args_no_rcvr.len());
+
+ let err_inputs = match tuple_arguments {
+ DontTupleArguments => err_inputs,
+ TupleArguments => vec![self.tcx.mk_tup(err_inputs)],
+ };
+
+ self.check_argument_types(sp, &err_inputs[..], &[], args_no_rcvr,
+ false, tuple_arguments);
+ ty::FnConverging(self.tcx.types.err)
+ } else {
+ match method_fn_ty.sty {
+ ty::TyFnDef(_, _, ref fty) => {
+ // HACK(eddyb) ignore self in the definition (see above).
+ let expected_arg_tys = self.expected_types_for_fn_args(sp, expected,
+ fty.sig.0.output,
+ &fty.sig.0.inputs[1..]);
+ self.check_argument_types(sp, &fty.sig.0.inputs[1..], &expected_arg_tys[..],
+ args_no_rcvr, fty.sig.0.variadic, tuple_arguments);
+ fty.sig.0.output
+ }
+ _ => {
+ span_bug!(callee_expr.span, "method without bare fn type");
+ }
}
}
}
-}
-/// Generic function that factors out common logic from function calls, method calls and overloaded
-/// operators.
-fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- fn_inputs: &[Ty<'tcx>],
- expected_arg_tys: &[Ty<'tcx>],
- args: &'tcx [P<hir::Expr>],
- variadic: bool,
- tuple_arguments: TupleArgumentsFlag) {
- let tcx = fcx.ccx.tcx;
-
- // Grab the argument types, supplying fresh type variables
- // if the wrong number of arguments were supplied
- let supplied_arg_count = if tuple_arguments == DontTupleArguments {
- args.len()
- } else {
- 1
- };
+ /// Generic function that factors out common logic from function calls,
+ /// method calls and overloaded operators.
+ fn check_argument_types(&self,
+ sp: Span,
+ fn_inputs: &[Ty<'tcx>],
+ expected_arg_tys: &[Ty<'tcx>],
+ args: &'gcx [P<hir::Expr>],
+ variadic: bool,
+ tuple_arguments: TupleArgumentsFlag) {
+ let tcx = self.tcx;
+
+ // Grab the argument types, supplying fresh type variables
+ // if the wrong number of arguments were supplied
+ let supplied_arg_count = if tuple_arguments == DontTupleArguments {
+ args.len()
+ } else {
+ 1
+ };
- // All the input types from the fn signature must outlive the call
- // so as to validate implied bounds.
- for &fn_input_ty in fn_inputs {
- fcx.register_wf_obligation(fn_input_ty, sp, traits::MiscObligation);
- }
-
- let mut expected_arg_tys = expected_arg_tys;
- let expected_arg_count = fn_inputs.len();
- let formal_tys = if tuple_arguments == TupleArguments {
- let tuple_type = structurally_resolved_type(fcx, sp, fn_inputs[0]);
- match tuple_type.sty {
- ty::TyTuple(ref arg_types) => {
- if arg_types.len() != args.len() {
- span_err!(tcx.sess, sp, E0057,
- "this function takes {} parameter{} but {} parameter{} supplied",
- arg_types.len(),
- if arg_types.len() == 1 {""} else {"s"},
- args.len(),
- if args.len() == 1 {" was"} else {"s were"});
+ // All the input types from the fn signature must outlive the call
+ // so as to validate implied bounds.
+ for &fn_input_ty in fn_inputs {
+ self.register_wf_obligation(fn_input_ty, sp, traits::MiscObligation);
+ }
+
+ let mut expected_arg_tys = expected_arg_tys;
+ let expected_arg_count = fn_inputs.len();
+ let formal_tys = if tuple_arguments == TupleArguments {
+ let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
+ match tuple_type.sty {
+ ty::TyTuple(arg_types) => {
+ if arg_types.len() != args.len() {
+ span_err!(tcx.sess, sp, E0057,
+ "this function takes {} parameter{} but {} parameter{} supplied",
+ arg_types.len(),
+ if arg_types.len() == 1 {""} else {"s"},
+ args.len(),
+ if args.len() == 1 {" was"} else {"s were"});
+ expected_arg_tys = &[];
+ self.err_args(args.len())
+ } else {
+ expected_arg_tys = match expected_arg_tys.get(0) {
+ Some(&ty) => match ty.sty {
+ ty::TyTuple(ref tys) => &tys,
+ _ => &[]
+ },
+ None => &[]
+ };
+ arg_types.to_vec()
+ }
+ }
+ _ => {
+ span_err!(tcx.sess, sp, E0059,
+ "cannot use call notation; the first type parameter \
+ for the function trait is neither a tuple nor unit");
expected_arg_tys = &[];
- err_args(fcx.tcx(), args.len())
- } else {
- expected_arg_tys = match expected_arg_tys.get(0) {
- Some(&ty) => match ty.sty {
- ty::TyTuple(ref tys) => &tys,
- _ => &[]
- },
- None => &[]
- };
- (*arg_types).clone()
+ self.err_args(args.len())
}
}
- _ => {
- span_err!(tcx.sess, sp, E0059,
- "cannot use call notation; the first type parameter \
- for the function trait is neither a tuple nor unit");
+ } else if expected_arg_count == supplied_arg_count {
+ fn_inputs.to_vec()
+ } else if variadic {
+ if supplied_arg_count >= expected_arg_count {
+ fn_inputs.to_vec()
+ } else {
+ span_err!(tcx.sess, sp, E0060,
+ "this function takes at least {} parameter{} \
+ but {} parameter{} supplied",
+ expected_arg_count,
+ if expected_arg_count == 1 {""} else {"s"},
+ supplied_arg_count,
+ if supplied_arg_count == 1 {" was"} else {"s were"});
expected_arg_tys = &[];
- err_args(fcx.tcx(), args.len())
+ self.err_args(supplied_arg_count)
}
- }
- } else if expected_arg_count == supplied_arg_count {
- fn_inputs.to_vec()
- } else if variadic {
- if supplied_arg_count >= expected_arg_count {
- fn_inputs.to_vec()
} else {
- span_err!(tcx.sess, sp, E0060,
- "this function takes at least {} parameter{} \
- but {} parameter{} supplied",
+ span_err!(tcx.sess, sp, E0061,
+ "this function takes {} parameter{} but {} parameter{} supplied",
expected_arg_count,
if expected_arg_count == 1 {""} else {"s"},
supplied_arg_count,
if supplied_arg_count == 1 {" was"} else {"s were"});
expected_arg_tys = &[];
- err_args(fcx.tcx(), supplied_arg_count)
- }
- } else {
- span_err!(tcx.sess, sp, E0061,
- "this function takes {} parameter{} but {} parameter{} supplied",
- expected_arg_count,
- if expected_arg_count == 1 {""} else {"s"},
- supplied_arg_count,
- if supplied_arg_count == 1 {" was"} else {"s were"});
- expected_arg_tys = &[];
- err_args(fcx.tcx(), supplied_arg_count)
- };
-
- debug!("check_argument_types: formal_tys={:?}",
- formal_tys.iter().map(|t| fcx.infcx().ty_to_string(*t)).collect::<Vec<String>>());
-
- // Check the arguments.
- // We do this in a pretty awful way: first we typecheck any arguments
- // that are not anonymous functions, then we typecheck the anonymous
- // functions. This is so that we have more information about the types
- // of arguments when we typecheck the functions. This isn't really the
- // right way to do this.
- let xs = [false, true];
- let mut any_diverges = false; // has any of the arguments diverged?
- let mut warned = false; // have we already warned about unreachable code?
- for check_blocks in &xs {
- let check_blocks = *check_blocks;
- debug!("check_blocks={}", check_blocks);
-
- // More awful hacks: before we check argument types, try to do
- // an "opportunistic" vtable resolution of any trait bounds on
- // the call. This helps coercions.
- if check_blocks {
- fcx.select_obligations_where_possible();
- }
-
- // For variadic functions, we don't have a declared type for all of
- // the arguments hence we only do our usual type checking with
- // the arguments who's types we do know.
- let t = if variadic {
- expected_arg_count
- } else if tuple_arguments == TupleArguments {
- args.len()
- } else {
- supplied_arg_count
+ self.err_args(supplied_arg_count)
};
- for (i, arg) in args.iter().take(t).enumerate() {
- if any_diverges && !warned {
- fcx.ccx
- .tcx
- .sess
- .add_lint(lint::builtin::UNREACHABLE_CODE,
- arg.id,
- arg.span,
- "unreachable expression".to_string());
- warned = true;
+
+ debug!("check_argument_types: formal_tys={:?}",
+ formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>());
+
+ // Check the arguments.
+ // We do this in a pretty awful way: first we typecheck any arguments
+ // that are not anonymous functions, then we typecheck the anonymous
+ // functions. This is so that we have more information about the types
+ // of arguments when we typecheck the functions. This isn't really the
+ // right way to do this.
+ let xs = [false, true];
+ let mut any_diverges = false; // has any of the arguments diverged?
+ let mut warned = false; // have we already warned about unreachable code?
+ for check_blocks in &xs {
+ let check_blocks = *check_blocks;
+ debug!("check_blocks={}", check_blocks);
+
+ // More awful hacks: before we check argument types, try to do
+ // an "opportunistic" vtable resolution of any trait bounds on
+ // the call. This helps coercions.
+ if check_blocks {
+ self.select_obligations_where_possible();
}
- let is_block = match arg.node {
- hir::ExprClosure(..) => true,
- _ => false
+
+ // For variadic functions, we don't have a declared type for all of
+ // the arguments hence we only do our usual type checking with
+ // the arguments who's types we do know.
+ let t = if variadic {
+ expected_arg_count
+ } else if tuple_arguments == TupleArguments {
+ args.len()
+ } else {
+ supplied_arg_count
};
+ for (i, arg) in args.iter().take(t).enumerate() {
+ if any_diverges && !warned {
+ self.tcx
+ .sess
+ .add_lint(lint::builtin::UNREACHABLE_CODE,
+ arg.id,
+ arg.span,
+ "unreachable expression".to_string());
+ warned = true;
+ }
+ let is_block = match arg.node {
+ hir::ExprClosure(..) => true,
+ _ => false
+ };
- if is_block == check_blocks {
- debug!("checking the argument");
- let formal_ty = formal_tys[i];
+ if is_block == check_blocks {
+ debug!("checking the argument");
+ let formal_ty = formal_tys[i];
- // The special-cased logic below has three functions:
- // 1. Provide as good of an expected type as possible.
- let expected = expected_arg_tys.get(i).map(|&ty| {
- Expectation::rvalue_hint(fcx.tcx(), ty)
- });
+ // The special-cased logic below has three functions:
+ // 1. Provide as good of an expected type as possible.
+ let expected = expected_arg_tys.get(i).map(|&ty| {
+ Expectation::rvalue_hint(self, ty)
+ });
- check_expr_with_expectation(fcx, &arg,
- expected.unwrap_or(ExpectHasType(formal_ty)));
- // 2. Coerce to the most detailed type that could be coerced
- // to, which is `expected_ty` if `rvalue_hint` returns an
- // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
- let coerce_ty = expected.and_then(|e| e.only_has_type(fcx));
- demand::coerce(fcx, arg.span, coerce_ty.unwrap_or(formal_ty), &arg);
-
- // 3. Relate the expected type and the formal one,
- // if the expected type was used for the coercion.
- coerce_ty.map(|ty| demand::suptype(fcx, arg.span, formal_ty, ty));
- }
+ self.check_expr_with_expectation(&arg,
+ expected.unwrap_or(ExpectHasType(formal_ty)));
+ // 2. Coerce to the most detailed type that could be coerced
+ // to, which is `expected_ty` if `rvalue_hint` returns an
+ // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
+ let coerce_ty = expected.and_then(|e| e.only_has_type(self));
+ self.demand_coerce(&arg, coerce_ty.unwrap_or(formal_ty));
+
+ // 3. Relate the expected type and the formal one,
+ // if the expected type was used for the coercion.
+ coerce_ty.map(|ty| self.demand_suptype(arg.span, formal_ty, ty));
+ }
- if let Some(&arg_ty) = fcx.inh.tables.borrow().node_types.get(&arg.id) {
- any_diverges = any_diverges || fcx.infcx().type_var_diverges(arg_ty);
+ if let Some(&arg_ty) = self.tables.borrow().node_types.get(&arg.id) {
+ any_diverges = any_diverges || self.type_var_diverges(arg_ty);
+ }
}
- }
- if any_diverges && !warned {
- let parent = fcx.ccx.tcx.map.get_parent_node(args[0].id);
- fcx.ccx
- .tcx
- .sess
- .add_lint(lint::builtin::UNREACHABLE_CODE,
- parent,
- sp,
- "unreachable call".to_string());
- warned = true;
+ if any_diverges && !warned {
+ let parent = self.tcx.map.get_parent_node(args[0].id);
+ self.tcx
+ .sess
+ .add_lint(lint::builtin::UNREACHABLE_CODE,
+ parent,
+ sp,
+ "unreachable call".to_string());
+ warned = true;
+ }
+
}
- }
+ // We also need to make sure we at least write the ty of the other
+ // arguments which we skipped above.
+ if variadic {
+ for arg in args.iter().skip(expected_arg_count) {
+ self.check_expr(&arg);
- // We also need to make sure we at least write the ty of the other
- // arguments which we skipped above.
- if variadic {
- for arg in args.iter().skip(expected_arg_count) {
- check_expr(fcx, &arg);
-
- // There are a few types which get autopromoted when passed via varargs
- // in C but we just error out instead and require explicit casts.
- let arg_ty = structurally_resolved_type(fcx, arg.span,
- fcx.expr_ty(&arg));
- match arg_ty.sty {
- ty::TyFloat(ast::FloatTy::F32) => {
- fcx.type_error_message(arg.span,
- |t| {
- format!("can't pass an `{}` to variadic \
- function, cast to `c_double`", t)
- }, arg_ty, None);
- }
- ty::TyInt(ast::IntTy::I8) | ty::TyInt(ast::IntTy::I16) | ty::TyBool => {
- fcx.type_error_message(arg.span, |t| {
- format!("can't pass `{}` to variadic \
- function, cast to `c_int`",
- t)
- }, arg_ty, None);
- }
- ty::TyUint(ast::UintTy::U8) | ty::TyUint(ast::UintTy::U16) => {
- fcx.type_error_message(arg.span, |t| {
- format!("can't pass `{}` to variadic \
- function, cast to `c_uint`",
- t)
- }, arg_ty, None);
- }
- ty::TyFnDef(_, _, f) => {
- let ptr_ty = fcx.tcx().mk_ty(ty::TyFnPtr(f));
- let ptr_ty = fcx.infcx().resolve_type_vars_if_possible(&ptr_ty);
- fcx.type_error_message(arg.span,
- |t| {
- format!("can't pass `{}` to variadic \
- function, cast to `{}`", t, ptr_ty)
- }, arg_ty, None);
+ // There are a few types which get autopromoted when passed via varargs
+ // in C but we just error out instead and require explicit casts.
+ let arg_ty = self.structurally_resolved_type(arg.span,
+ self.expr_ty(&arg));
+ match arg_ty.sty {
+ ty::TyFloat(ast::FloatTy::F32) => {
+ self.type_error_message(arg.span, |t| {
+ format!("can't pass an `{}` to variadic \
+ function, cast to `c_double`", t)
+ }, arg_ty, None);
+ }
+ ty::TyInt(ast::IntTy::I8) | ty::TyInt(ast::IntTy::I16) | ty::TyBool => {
+ self.type_error_message(arg.span, |t| {
+ format!("can't pass `{}` to variadic \
+ function, cast to `c_int`",
+ t)
+ }, arg_ty, None);
+ }
+ ty::TyUint(ast::UintTy::U8) | ty::TyUint(ast::UintTy::U16) => {
+ self.type_error_message(arg.span, |t| {
+ format!("can't pass `{}` to variadic \
+ function, cast to `c_uint`",
+ t)
+ }, arg_ty, None);
+ }
+ ty::TyFnDef(_, _, f) => {
+ let ptr_ty = self.tcx.mk_fn_ptr(f);
+ let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty);
+ self.type_error_message(arg.span,
+ |t| {
+ format!("can't pass `{}` to variadic \
+ function, cast to `{}`", t, ptr_ty)
+ }, arg_ty, None);
+ }
+ _ => {}
}
- _ => {}
}
}
}
-}
-
-// FIXME(#17596) Ty<'tcx> is incorrectly invariant w.r.t 'tcx.
-fn err_args<'tcx>(tcx: &TyCtxt<'tcx>, len: usize) -> Vec<Ty<'tcx>> {
- (0..len).map(|_| tcx.types.err).collect()
-}
-
-fn write_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_expr: &hir::Expr,
- output: ty::FnOutput<'tcx>) {
- fcx.write_ty(call_expr.id, match output {
- ty::FnConverging(output_ty) => output_ty,
- ty::FnDiverging => fcx.infcx().next_diverging_ty_var()
- });
-}
-// AST fragment checking
-fn check_lit<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- lit: &ast::Lit,
- expected: Expectation<'tcx>)
- -> Ty<'tcx>
-{
- let tcx = fcx.ccx.tcx;
-
- match lit.node {
- ast::LitKind::Str(..) => tcx.mk_static_str(),
- ast::LitKind::ByteStr(ref v) => {
- tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
- tcx.mk_array(tcx.types.u8, v.len()))
- }
- ast::LitKind::Byte(_) => tcx.types.u8,
- ast::LitKind::Char(_) => tcx.types.char,
- ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t),
- ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t),
- ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
- let opt_ty = expected.to_option(fcx).and_then(|ty| {
- match ty.sty {
- ty::TyInt(_) | ty::TyUint(_) => Some(ty),
- ty::TyChar => Some(tcx.types.u8),
- ty::TyRawPtr(..) => Some(tcx.types.usize),
- ty::TyFnDef(..) | ty::TyFnPtr(_) => Some(tcx.types.usize),
- _ => None
- }
- });
- opt_ty.unwrap_or_else(
- || tcx.mk_int_var(fcx.infcx().next_int_var_id()))
- }
- ast::LitKind::Float(_, t) => tcx.mk_mach_float(t),
- ast::LitKind::FloatUnsuffixed(_) => {
- let opt_ty = expected.to_option(fcx).and_then(|ty| {
- match ty.sty {
- ty::TyFloat(_) => Some(ty),
- _ => None
- }
- });
- opt_ty.unwrap_or_else(
- || tcx.mk_float_var(fcx.infcx().next_float_var_id()))
- }
- ast::LitKind::Bool(_) => tcx.types.bool
+ fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
+ (0..len).map(|_| self.tcx.types.err).collect()
}
-}
-
-fn check_expr_eq_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- expected: Ty<'tcx>) {
- check_expr_with_hint(fcx, expr, expected);
- demand::eqtype(fcx, expr.span, expected, fcx.expr_ty(expr));
-}
-
-pub fn check_expr_has_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- expected: Ty<'tcx>) {
- check_expr_with_hint(fcx, expr, expected);
- demand::suptype(fcx, expr.span, expected, fcx.expr_ty(expr));
-}
-
-fn check_expr_coercable_to_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- expected: Ty<'tcx>) {
- check_expr_with_hint(fcx, expr, expected);
- demand::coerce(fcx, expr.span, expected, expr);
-}
-
-fn check_expr_with_hint<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, expr: &'tcx hir::Expr,
- expected: Ty<'tcx>) {
- check_expr_with_expectation(fcx, expr, ExpectHasType(expected))
-}
-
-fn check_expr_with_expectation<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- expected: Expectation<'tcx>) {
- check_expr_with_expectation_and_lvalue_pref(fcx, expr, expected, NoPreference)
-}
-
-fn check_expr<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, expr: &'tcx hir::Expr) {
- check_expr_with_expectation(fcx, expr, NoExpectation)
-}
-
-fn check_expr_with_lvalue_pref<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, expr: &'tcx hir::Expr,
- lvalue_pref: LvaluePreference) {
- check_expr_with_expectation_and_lvalue_pref(fcx, expr, NoExpectation, lvalue_pref)
-}
-
-// determine the `self` type, using fresh variables for all variables
-// declared on the impl declaration e.g., `impl<A,B> for Vec<(A,B)>`
-// would return ($0, $1) where $0 and $1 are freshly instantiated type
-// variables.
-pub fn impl_self_ty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- span: Span, // (potential) receiver for this impl
- did: DefId)
- -> TypeAndSubsts<'tcx> {
- let tcx = fcx.tcx();
-
- let ity = tcx.lookup_item_type(did);
- let (tps, rps, raw_ty) =
- (ity.generics.types.get_slice(subst::TypeSpace),
- ity.generics.regions.get_slice(subst::TypeSpace),
- ity.ty);
-
- debug!("impl_self_ty: tps={:?} rps={:?} raw_ty={:?}", tps, rps, raw_ty);
-
- let rps = fcx.inh.infcx.region_vars_for_defs(span, rps);
- let mut substs = subst::Substs::new(
- VecPerParamSpace::empty(),
- VecPerParamSpace::new(rps, Vec::new(), Vec::new()));
- fcx.inh.infcx.type_vars_for_defs(span, ParamSpace::TypeSpace, &mut substs, tps);
- let substd_ty = fcx.instantiate_type_scheme(span, &substs, &raw_ty);
-
- TypeAndSubsts { substs: substs, ty: substd_ty }
-}
-/// Controls whether the arguments are tupled. This is used for the call
-/// operator.
-///
-/// Tupling means that all call-side arguments are packed into a tuple and
-/// passed as a single parameter. For example, if tupling is enabled, this
-/// function:
-///
-/// fn f(x: (isize, isize))
-///
-/// Can be called as:
-///
-/// f(1, 2);
-///
-/// Instead of:
-///
-/// f((1, 2));
-#[derive(Clone, Eq, PartialEq)]
-enum TupleArgumentsFlag {
- DontTupleArguments,
- TupleArguments,
-}
+ fn write_call(&self,
+ call_expr: &hir::Expr,
+ output: ty::FnOutput<'tcx>) {
+ self.write_ty(call_expr.id, match output {
+ ty::FnConverging(output_ty) => output_ty,
+ ty::FnDiverging => self.next_diverging_ty_var()
+ });
+ }
-/// Unifies the return type with the expected type early, for more coercions
-/// and forward type information on the argument expressions.
-fn expected_types_for_fn_args<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- call_span: Span,
- expected_ret: Expectation<'tcx>,
- formal_ret: ty::FnOutput<'tcx>,
- formal_args: &[Ty<'tcx>])
- -> Vec<Ty<'tcx>> {
- let expected_args = expected_ret.only_has_type(fcx).and_then(|ret_ty| {
- if let ty::FnConverging(formal_ret_ty) = formal_ret {
- fcx.infcx().commit_regions_if_ok(|| {
- // Attempt to apply a subtyping relationship between the formal
- // return type (likely containing type variables if the function
- // is polymorphic) and the expected return type.
- // No argument expectations are produced if unification fails.
- let origin = TypeOrigin::Misc(call_span);
- let ures = fcx.infcx().sub_types(false, origin, formal_ret_ty, ret_ty);
- // FIXME(#15760) can't use try! here, FromError doesn't default
- // to identity so the resulting type is not constrained.
- match ures {
- // FIXME(#32730) propagate obligations
- Ok(InferOk { obligations, .. }) => assert!(obligations.is_empty()),
- Err(e) => return Err(e),
- }
+ // AST fragment checking
+ fn check_lit(&self,
+ lit: &ast::Lit,
+ expected: Expectation<'tcx>)
+ -> Ty<'tcx>
+ {
+ let tcx = self.tcx;
- // Record all the argument types, with the substitutions
- // produced from the above subtyping unification.
- Ok(formal_args.iter().map(|ty| {
- fcx.infcx().resolve_type_vars_if_possible(ty)
- }).collect())
- }).ok()
- } else {
- None
- }
- }).unwrap_or(vec![]);
- debug!("expected_types_for_fn_args(formal={:?} -> {:?}, expected={:?} -> {:?})",
- formal_args, formal_ret,
- expected_args, expected_ret);
- expected_args
-}
+ match lit.node {
+ ast::LitKind::Str(..) => tcx.mk_static_str(),
+ ast::LitKind::ByteStr(ref v) => {
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ tcx.mk_array(tcx.types.u8, v.len()))
+ }
+ ast::LitKind::Byte(_) => tcx.types.u8,
+ ast::LitKind::Char(_) => tcx.types.char,
+ ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t),
+ ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t),
+ ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| {
+ match ty.sty {
+ ty::TyInt(_) | ty::TyUint(_) => Some(ty),
+ ty::TyChar => Some(tcx.types.u8),
+ ty::TyRawPtr(..) => Some(tcx.types.usize),
+ ty::TyFnDef(..) | ty::TyFnPtr(_) => Some(tcx.types.usize),
+ _ => None
+ }
+ });
+ opt_ty.unwrap_or_else(
+ || tcx.mk_int_var(self.next_int_var_id()))
+ }
+ ast::LitKind::Float(_, t) => tcx.mk_mach_float(t),
+ ast::LitKind::FloatUnsuffixed(_) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| {
+ match ty.sty {
+ ty::TyFloat(_) => Some(ty),
+ _ => None
+ }
+ });
+ opt_ty.unwrap_or_else(
+ || tcx.mk_float_var(self.next_float_var_id()))
+ }
+ ast::LitKind::Bool(_) => tcx.types.bool
+ }
+ }
+
+ fn check_expr_eq_type(&self,
+ expr: &'gcx hir::Expr,
+ expected: Ty<'tcx>) {
+ self.check_expr_with_hint(expr, expected);
+ self.demand_eqtype(expr.span, expected, self.expr_ty(expr));
+ }
+
+ pub fn check_expr_has_type(&self,
+ expr: &'gcx hir::Expr,
+ expected: Ty<'tcx>) {
+ self.check_expr_with_hint(expr, expected);
+ self.demand_suptype(expr.span, expected, self.expr_ty(expr));
+ }
+
+ fn check_expr_coercable_to_type(&self,
+ expr: &'gcx hir::Expr,
+ expected: Ty<'tcx>) {
+ self.check_expr_with_hint(expr, expected);
+ self.demand_coerce(expr, expected);
+ }
+
+ fn check_expr_with_hint(&self, expr: &'gcx hir::Expr,
+ expected: Ty<'tcx>) {
+ self.check_expr_with_expectation(expr, ExpectHasType(expected))
+ }
+
+ fn check_expr_with_expectation(&self,
+ expr: &'gcx hir::Expr,
+ expected: Expectation<'tcx>) {
+ self.check_expr_with_expectation_and_lvalue_pref(expr, expected, NoPreference)
+ }
+
+ fn check_expr(&self, expr: &'gcx hir::Expr) {
+ self.check_expr_with_expectation(expr, NoExpectation)
+ }
+
+ fn check_expr_with_lvalue_pref(&self, expr: &'gcx hir::Expr,
+ lvalue_pref: LvaluePreference) {
+ self.check_expr_with_expectation_and_lvalue_pref(expr, NoExpectation, lvalue_pref)
+ }
+
+ // determine the `self` type, using fresh variables for all variables
+ // declared on the impl declaration e.g., `impl<A,B> for Vec<(A,B)>`
+ // would return ($0, $1) where $0 and $1 are freshly instantiated type
+ // variables.
+ pub fn impl_self_ty(&self,
+ span: Span, // (potential) receiver for this impl
+ did: DefId)
+ -> TypeAndSubsts<'tcx> {
+ let tcx = self.tcx;
+
+ let ity = tcx.lookup_item_type(did);
+ let (tps, rps, raw_ty) =
+ (ity.generics.types.get_slice(subst::TypeSpace),
+ ity.generics.regions.get_slice(subst::TypeSpace),
+ ity.ty);
+
+ debug!("impl_self_ty: tps={:?} rps={:?} raw_ty={:?}", tps, rps, raw_ty);
+
+ let rps = self.region_vars_for_defs(span, rps);
+ let mut substs = subst::Substs::new(
+ VecPerParamSpace::empty(),
+ VecPerParamSpace::new(rps, Vec::new(), Vec::new()));
+ self.type_vars_for_defs(span, ParamSpace::TypeSpace, &mut substs, tps);
+ let substd_ty = self.instantiate_type_scheme(span, &substs, &raw_ty);
+
+ TypeAndSubsts { substs: substs, ty: substd_ty }
+ }
+
+ /// Unifies the return type with the expected type early, for more coercions
+ /// and forward type information on the argument expressions.
+ fn expected_types_for_fn_args(&self,
+ call_span: Span,
+ expected_ret: Expectation<'tcx>,
+ formal_ret: ty::FnOutput<'tcx>,
+ formal_args: &[Ty<'tcx>])
+ -> Vec<Ty<'tcx>> {
+ let expected_args = expected_ret.only_has_type(self).and_then(|ret_ty| {
+ if let ty::FnConverging(formal_ret_ty) = formal_ret {
+ self.commit_regions_if_ok(|| {
+ // Attempt to apply a subtyping relationship between the formal
+ // return type (likely containing type variables if the function
+ // is polymorphic) and the expected return type.
+ // No argument expectations are produced if unification fails.
+ let origin = TypeOrigin::Misc(call_span);
+ let ures = self.sub_types(false, origin, formal_ret_ty, ret_ty);
+ // FIXME(#15760) can't use try! here, FromError doesn't default
+ // to identity so the resulting type is not constrained.
+ match ures {
+ // FIXME(#32730) propagate obligations
+ Ok(InferOk { obligations, .. }) => assert!(obligations.is_empty()),
+ Err(e) => return Err(e),
+ }
-/// Invariant:
-/// If an expression has any sub-expressions that result in a type error,
-/// inspecting that expression's type with `ty.references_error()` will return
-/// true. Likewise, if an expression is known to diverge, inspecting its
-/// type with `ty::type_is_bot` will return true (n.b.: since Rust is
-/// strict, _|_ can appear in the type of an expression that does not,
-/// itself, diverge: for example, fn() -> _|_.)
-/// Note that inspecting a type's structure *directly* may expose the fact
-/// that there are actually multiple representations for `TyError`, so avoid
-/// that when err needs to be handled differently.
-fn check_expr_with_expectation_and_lvalue_pref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- expected: Expectation<'tcx>,
- lvalue_pref: LvaluePreference) {
- debug!(">> typechecking: expr={:?} expected={:?}",
- expr, expected);
+ // Record all the argument types, with the substitutions
+ // produced from the above subtyping unification.
+ Ok(formal_args.iter().map(|ty| {
+ self.resolve_type_vars_if_possible(ty)
+ }).collect())
+ }).ok()
+ } else {
+ None
+ }
+ }).unwrap_or(vec![]);
+ debug!("expected_types_for_fn_args(formal={:?} -> {:?}, expected={:?} -> {:?})",
+ formal_args, formal_ret,
+ expected_args, expected_ret);
+ expected_args
+ }
// Checks a method call.
- fn check_method_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- method_name: Spanned<ast::Name>,
- args: &'tcx [P<hir::Expr>],
- tps: &[P<hir::Ty>],
- expected: Expectation<'tcx>,
- lvalue_pref: LvaluePreference) {
+ fn check_method_call(&self,
+ expr: &'gcx hir::Expr,
+ method_name: Spanned<ast::Name>,
+ args: &'gcx [P<hir::Expr>],
+ tps: &[P<hir::Ty>],
+ expected: Expectation<'tcx>,
+ lvalue_pref: LvaluePreference) {
let rcvr = &args[0];
- check_expr_with_lvalue_pref(fcx, &rcvr, lvalue_pref);
+ self.check_expr_with_lvalue_pref(&rcvr, lvalue_pref);
// no need to check for bot/err -- callee does that
- let expr_t = structurally_resolved_type(fcx,
- expr.span,
- fcx.expr_ty(&rcvr));
-
- let tps = tps.iter().map(|ast_ty| fcx.to_ty(&ast_ty)).collect::<Vec<_>>();
- let fn_ty = match method::lookup(fcx,
- method_name.span,
- method_name.node,
- expr_t,
- tps,
- expr,
- rcvr) {
+ let expr_t = self.structurally_resolved_type(expr.span, self.expr_ty(&rcvr));
+
+ let tps = tps.iter().map(|ast_ty| self.to_ty(&ast_ty)).collect::<Vec<_>>();
+ let fn_ty = match self.lookup_method(method_name.span,
+ method_name.node,
+ expr_t,
+ tps,
+ expr,
+ rcvr) {
Ok(method) => {
let method_ty = method.ty;
let method_call = MethodCall::expr(expr.id);
- fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
+ self.tables.borrow_mut().method_map.insert(method_call, method);
method_ty
}
Err(error) => {
if method_name.node != keywords::Invalid.name() {
- method::report_error(fcx, method_name.span, expr_t,
- method_name.node, Some(rcvr), error);
+ self.report_method_error(method_name.span, expr_t,
+ method_name.node, Some(rcvr), error);
}
- fcx.write_error(expr.id);
- fcx.tcx().types.err
+ self.write_error(expr.id);
+ self.tcx.types.err
}
};
// Call the generic checker.
- let ret_ty = check_method_argument_types(fcx,
- method_name.span,
- fn_ty,
- expr,
- &args[1..],
- DontTupleArguments,
- expected);
+ let ret_ty = self.check_method_argument_types(method_name.span, fn_ty,
+ expr, &args[1..],
+ DontTupleArguments,
+ expected);
- write_call(fcx, expr, ret_ty);
+ self.write_call(expr, ret_ty);
}
// A generic function for checking the then and else in an if
// or if-else.
- fn check_then_else<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- cond_expr: &'tcx hir::Expr,
- then_blk: &'tcx hir::Block,
- opt_else_expr: Option<&'tcx hir::Expr>,
- id: ast::NodeId,
- sp: Span,
- expected: Expectation<'tcx>) {
- check_expr_has_type(fcx, cond_expr, fcx.tcx().types.bool);
-
- let expected = expected.adjust_for_branches(fcx);
- check_block_with_expected(fcx, then_blk, expected);
- let then_ty = fcx.node_ty(then_blk.id);
-
- let unit = fcx.tcx().mk_nil();
+ fn check_then_else(&self,
+ cond_expr: &'gcx hir::Expr,
+ then_blk: &'gcx hir::Block,
+ opt_else_expr: Option<&'gcx hir::Expr>,
+ id: ast::NodeId,
+ sp: Span,
+ expected: Expectation<'tcx>) {
+ self.check_expr_has_type(cond_expr, self.tcx.types.bool);
+
+ let expected = expected.adjust_for_branches(self);
+ self.check_block_with_expected(then_blk, expected);
+ let then_ty = self.node_ty(then_blk.id);
+
+ let unit = self.tcx.mk_nil();
let (origin, expected, found, result) =
if let Some(else_expr) = opt_else_expr {
- check_expr_with_expectation(fcx, else_expr, expected);
- let else_ty = fcx.expr_ty(else_expr);
+ self.check_expr_with_expectation(else_expr, expected);
+ let else_ty = self.expr_ty(else_expr);
let origin = TypeOrigin::IfExpression(sp);
// Only try to coerce-unify if we have a then expression
// to assign coercions to, otherwise it's () or diverging.
let result = if let Some(ref then) = then_blk.expr {
- let res = coercion::try_find_lub(fcx, origin, || Some(&**then),
- then_ty, else_expr);
+ let res = self.try_find_coercion_lub(origin, || Some(&**then),
+ then_ty, else_expr);
// In case we did perform an adjustment, we have to update
// the type of the block, because old trans still uses it.
- let adj = fcx.inh.tables.borrow().adjustments.get(&then.id).cloned();
+ let adj = self.tables.borrow().adjustments.get(&then.id).cloned();
if res.is_ok() && adj.is_some() {
- fcx.write_ty(then_blk.id, fcx.adjust_expr_ty(then, adj.as_ref()));
+ self.write_ty(then_blk.id, self.adjust_expr_ty(then, adj.as_ref()));
}
res
} else {
- fcx.infcx().commit_if_ok(|_| {
+ self.commit_if_ok(|_| {
let trace = TypeTrace::types(origin, true, then_ty, else_ty);
- fcx.infcx().lub(true, trace, &then_ty, &else_ty)
+ self.lub(true, trace, &then_ty, &else_ty)
.map(|InferOk { value, obligations }| {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
} else {
let origin = TypeOrigin::IfExpressionWithNoElse(sp);
(origin, unit, then_ty,
- fcx.infcx().eq_types(true, origin, unit, then_ty)
+ self.eq_types(true, origin, unit, then_ty)
.map(|InferOk { obligations, .. }| {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
let if_ty = match result {
Ok(ty) => {
- if fcx.expr_ty(cond_expr).references_error() {
- fcx.tcx().types.err
+ if self.expr_ty(cond_expr).references_error() {
+ self.tcx.types.err
} else {
ty
}
}
Err(e) => {
- fcx.infcx().report_mismatched_types(origin, expected, found, e);
- fcx.tcx().types.err
+ self.report_mismatched_types(origin, expected, found, e);
+ self.tcx.types.err
}
};
- fcx.write_ty(id, if_ty);
+ self.write_ty(id, if_ty);
}
// Check field access expressions
- fn check_field<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- expr: &'tcx hir::Expr,
- lvalue_pref: LvaluePreference,
- base: &'tcx hir::Expr,
- field: &Spanned<ast::Name>) {
- check_expr_with_lvalue_pref(fcx, base, lvalue_pref);
- let expr_t = structurally_resolved_type(fcx, expr.span, fcx.expr_ty(base));
+ fn check_field(&self,
+ expr: &'gcx hir::Expr,
+ lvalue_pref: LvaluePreference,
+ base: &'gcx hir::Expr,
+ field: &Spanned<ast::Name>) {
+ self.check_expr_with_lvalue_pref(base, lvalue_pref);
+ let expr_t = self.structurally_resolved_type(expr.span,
+ self.expr_ty(base));
let mut private_candidate = None;
- let (_, autoderefs, field_ty) = autoderef(fcx,
- expr.span,
- expr_t,
- || Some(base),
- UnresolvedTypeAction::Error,
- lvalue_pref,
- |base_t, _| {
+ let (_, autoderefs, field_ty) = self.autoderef(expr.span,
+ expr_t,
+ || Some(base),
+ UnresolvedTypeAction::Error,
+ lvalue_pref,
+ |base_t, _| {
if let ty::TyStruct(base_def, substs) = base_t.sty {
debug!("struct named {:?}", base_t);
if let Some(field) = base_def.struct_variant().find_field_named(field.node) {
- let field_ty = fcx.field_ty(expr.span, field, substs);
- if field.vis.is_accessible_from(fcx.body_id, &fcx.tcx().map) {
+ let field_ty = self.field_ty(expr.span, field, substs);
+ if field.vis.is_accessible_from(self.body_id, &self.tcx().map) {
return Some(field_ty);
}
private_candidate = Some((base_def.did, field_ty));
});
match field_ty {
Some(field_ty) => {
- fcx.write_ty(expr.id, field_ty);
- fcx.write_autoderef_adjustment(base.id, autoderefs);
+ self.write_ty(expr.id, field_ty);
+ self.write_autoderef_adjustment(base.id, autoderefs);
return;
}
None => {}
}
if let Some((did, field_ty)) = private_candidate {
- let struct_path = fcx.tcx().item_path_str(did);
+ let struct_path = self.tcx().item_path_str(did);
let msg = format!("field `{}` of struct `{}` is private", field.node, struct_path);
- fcx.tcx().sess.span_err(expr.span, &msg);
- fcx.write_ty(expr.id, field_ty);
+ self.tcx().sess.span_err(expr.span, &msg);
+ self.write_ty(expr.id, field_ty);
} else if field.node == keywords::Invalid.name() {
- fcx.write_error(expr.id);
- } else if method::exists(fcx, field.span, field.node, expr_t, expr.id) {
- fcx.type_error_struct(field.span,
- |actual| {
- format!("attempted to take value of method `{}` on type \
- `{}`", field.node, actual)
- },
- expr_t, None)
+ self.write_error(expr.id);
+ } else if self.method_exists(field.span, field.node, expr_t, expr.id) {
+ self.type_error_struct(field.span, |actual| {
+ format!("attempted to take value of method `{}` on type \
+ `{}`", field.node, actual)
+ }, expr_t, None)
.help(
"maybe a `()` to call it is missing? \
If not, try an anonymous function")
.emit();
- fcx.write_error(expr.id);
+ self.write_error(expr.id);
} else {
- let mut err = fcx.type_error_struct(
- expr.span,
- |actual| {
- format!("attempted access of field `{}` on \
- type `{}`, but no field with that \
- name was found",
- field.node,
- actual)
- },
- expr_t, None);
+ let mut err = self.type_error_struct(expr.span, |actual| {
+ format!("attempted access of field `{}` on type `{}`, \
+ but no field with that name was found",
+ field.node, actual)
+ }, expr_t, None);
if let ty::TyStruct(def, _) = expr_t.sty {
- suggest_field_names(&mut err, def.struct_variant(), field, vec![]);
+ Self::suggest_field_names(&mut err, def.struct_variant(), field, vec![]);
}
err.emit();
- fcx.write_error(expr.id);
+ self.write_error(expr.id);
}
}
// displays hints about the closest matches in field names
- fn suggest_field_names<'tcx>(err: &mut DiagnosticBuilder,
- variant: ty::VariantDef<'tcx>,
- field: &Spanned<ast::Name>,
- skip : Vec<InternedString>) {
+ fn suggest_field_names(err: &mut DiagnosticBuilder,
+ variant: ty::VariantDef<'tcx>,
+ field: &Spanned<ast::Name>,
+ skip : Vec<InternedString>) {
let name = field.node.as_str();
- let names = variant.fields
- .iter()
- .filter_map(|ref field| {
- // ignore already set fields and private fields from non-local crates
- if skip.iter().any(|x| *x == field.name.as_str()) ||
- (variant.did.krate != LOCAL_CRATE && field.vis != Visibility::Public) {
- None
- } else {
- Some(&field.name)
- }
- });
+ let names = variant.fields.iter().filter_map(|field| {
+ // ignore already set fields and private fields from non-local crates
+ if skip.iter().any(|x| *x == field.name.as_str()) ||
+ (variant.did.krate != LOCAL_CRATE && field.vis != Visibility::Public) {
+ None
+ } else {
+ Some(&field.name)
+ }
+ });
// only find fits with at least one matching letter
if let Some(name) = find_best_match_for_name(names, &name, Some(name.len())) {
}
// Check tuple index expressions
- fn check_tup_field<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- expr: &'tcx hir::Expr,
- lvalue_pref: LvaluePreference,
- base: &'tcx hir::Expr,
- idx: codemap::Spanned<usize>) {
- check_expr_with_lvalue_pref(fcx, base, lvalue_pref);
- let expr_t = structurally_resolved_type(fcx, expr.span, fcx.expr_ty(base));
+ fn check_tup_field(&self,
+ expr: &'gcx hir::Expr,
+ lvalue_pref: LvaluePreference,
+ base: &'gcx hir::Expr,
+ idx: codemap::Spanned<usize>) {
+ self.check_expr_with_lvalue_pref(base, lvalue_pref);
+ let expr_t = self.structurally_resolved_type(expr.span,
+ self.expr_ty(base));
let mut private_candidate = None;
let mut tuple_like = false;
- let (_, autoderefs, field_ty) = autoderef(fcx,
- expr.span,
- expr_t,
- || Some(base),
- UnresolvedTypeAction::Error,
- lvalue_pref,
- |base_t, _| {
+ let (_, autoderefs, field_ty) = self.autoderef(expr.span,
+ expr_t,
+ || Some(base),
+ UnresolvedTypeAction::Error,
+ lvalue_pref,
+ |base_t, _| {
let (base_def, substs) = match base_t.sty {
ty::TyStruct(base_def, substs) => (base_def, substs),
ty::TyTuple(ref v) => {
debug!("tuple struct named {:?}", base_t);
if let Some(field) = base_def.struct_variant().fields.get(idx.node) {
- let field_ty = fcx.field_ty(expr.span, field, substs);
- if field.vis.is_accessible_from(fcx.body_id, &fcx.tcx().map) {
+ let field_ty = self.field_ty(expr.span, field, substs);
+ if field.vis.is_accessible_from(self.body_id, &self.tcx().map) {
return Some(field_ty);
}
private_candidate = Some((base_def.did, field_ty));
});
match field_ty {
Some(field_ty) => {
- fcx.write_ty(expr.id, field_ty);
- fcx.write_autoderef_adjustment(base.id, autoderefs);
+ self.write_ty(expr.id, field_ty);
+ self.write_autoderef_adjustment(base.id, autoderefs);
return;
}
None => {}
}
if let Some((did, field_ty)) = private_candidate {
- let struct_path = fcx.tcx().item_path_str(did);
+ let struct_path = self.tcx().item_path_str(did);
let msg = format!("field `{}` of struct `{}` is private", idx.node, struct_path);
- fcx.tcx().sess.span_err(expr.span, &msg);
- fcx.write_ty(expr.id, field_ty);
+ self.tcx().sess.span_err(expr.span, &msg);
+ self.write_ty(expr.id, field_ty);
return;
}
- fcx.type_error_message(
+ self.type_error_message(
expr.span,
|actual| {
if tuple_like {
},
expr_t, None);
- fcx.write_error(expr.id);
+ self.write_error(expr.id);
}
- fn report_unknown_field<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- ty: Ty<'tcx>,
- variant: ty::VariantDef<'tcx>,
- field: &hir::Field,
- skip_fields: &[hir::Field]) {
- let mut err = fcx.type_error_struct(
+ fn report_unknown_field(&self,
+ ty: Ty<'tcx>,
+ variant: ty::VariantDef<'tcx>,
+ field: &hir::Field,
+ skip_fields: &[hir::Field]) {
+ let mut err = self.type_error_struct(
field.name.span,
|actual| if let ty::TyEnum(..) = ty.sty {
format!("struct variant `{}::{}` has no field named `{}`",
None);
// prevent all specified fields from being suggested
let skip_fields = skip_fields.iter().map(|ref x| x.name.node.as_str());
- suggest_field_names(&mut err, variant, &field.name, skip_fields.collect());
+ Self::suggest_field_names(&mut err, variant, &field.name, skip_fields.collect());
err.emit();
}
- fn check_expr_struct_fields<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- adt_ty: Ty<'tcx>,
- span: Span,
- variant: ty::VariantDef<'tcx>,
- ast_fields: &'tcx [hir::Field],
- check_completeness: bool) {
- let tcx = fcx.ccx.tcx;
+ fn check_expr_struct_fields(&self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ variant: ty::VariantDef<'tcx>,
+ ast_fields: &'gcx [hir::Field],
+ check_completeness: bool) {
+ let tcx = self.tcx;
let substs = match adt_ty.sty {
ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs,
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
let expected_field_type;
if let Some(v_field) = remaining_fields.remove(&field.name.node) {
- expected_field_type = fcx.field_ty(field.span, v_field, substs);
+ expected_field_type = self.field_ty(field.span, v_field, substs);
} else {
error_happened = true;
expected_field_type = tcx.types.err;
if let Some(_) = variant.find_field_named(field.name.node) {
- span_err!(fcx.tcx().sess, field.name.span, E0062,
+ span_err!(self.tcx.sess, field.name.span, E0062,
"field `{}` specified more than once",
field.name.node);
} else {
- report_unknown_field(fcx, adt_ty, variant, field, ast_fields);
+ self.report_unknown_field(adt_ty, variant, field, ast_fields);
}
}
// Make sure to give a type to the field even if there's
// an error, so we can continue typechecking
- check_expr_coercable_to_type(fcx, &field.expr, expected_field_type);
+ self.check_expr_coercable_to_type(&field.expr, expected_field_type);
}
// Make sure the programmer specified all the fields.
}
- fn check_struct_fields_on_error<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- id: ast::NodeId,
- fields: &'tcx [hir::Field],
- base_expr: &'tcx Option<P<hir::Expr>>) {
+ fn check_struct_fields_on_error(&self,
+ id: ast::NodeId,
+ fields: &'gcx [hir::Field],
+ base_expr: &'gcx Option<P<hir::Expr>>) {
// Make sure to still write the types
// otherwise we might ICE
- fcx.write_error(id);
+ self.write_error(id);
for field in fields {
- check_expr(fcx, &field.expr);
+ self.check_expr(&field.expr);
}
match *base_expr {
- Some(ref base) => check_expr(fcx, &base),
+ Some(ref base) => self.check_expr(&base),
None => {}
}
}
- fn check_expr_struct<'a, 'tcx>(fcx: &FnCtxt<'a,'tcx>,
- expr: &hir::Expr,
- path: &hir::Path,
- fields: &'tcx [hir::Field],
- base_expr: &'tcx Option<P<hir::Expr>>)
+ fn check_expr_struct(&self,
+ expr: &hir::Expr,
+ path: &hir::Path,
+ fields: &'gcx [hir::Field],
+ base_expr: &'gcx Option<P<hir::Expr>>)
{
- let tcx = fcx.tcx();
+ let tcx = self.tcx;
// Find the relevant variant
let def = lookup_full_def(tcx, path.span, expr.id);
if def == Def::Err {
- fcx.infcx().set_tainted_by_errors();
- check_struct_fields_on_error(fcx, expr.id, fields, base_expr);
+ self.set_tainted_by_errors();
+ self.check_struct_fields_on_error(expr.id, fields, base_expr);
return;
}
- let variant = match fcx.def_struct_variant(def, path.span) {
+ let variant = match self.def_struct_variant(def, path.span) {
Some((_, variant)) => variant,
None => {
- span_err!(fcx.tcx().sess, path.span, E0071,
+ span_err!(self.tcx.sess, path.span, E0071,
"`{}` does not name a structure",
pprust::path_to_string(path));
- check_struct_fields_on_error(fcx, expr.id, fields, base_expr);
+ self.check_struct_fields_on_error(expr.id, fields, base_expr);
return;
}
};
- let expr_ty = fcx.instantiate_type(def.def_id(), path);
- fcx.write_ty(expr.id, expr_ty);
+ let expr_ty = self.instantiate_type(def.def_id(), path);
+ self.write_ty(expr.id, expr_ty);
- check_expr_struct_fields(fcx, expr_ty, expr.span, variant, fields,
- base_expr.is_none());
+ self.check_expr_struct_fields(expr_ty, expr.span, variant, fields,
+ base_expr.is_none());
if let &Some(ref base_expr) = base_expr {
- check_expr_has_type(fcx, base_expr, expr_ty);
+ self.check_expr_has_type(base_expr, expr_ty);
match expr_ty.sty {
ty::TyStruct(adt, substs) => {
- fcx.inh.tables.borrow_mut().fru_field_types.insert(
+ self.tables.borrow_mut().fru_field_types.insert(
expr.id,
adt.struct_variant().fields.iter().map(|f| {
- fcx.normalize_associated_types_in(
+ self.normalize_associated_types_in(
expr.span, &f.ty(tcx, substs)
)
}).collect()
}
}
- type ExprCheckerWithTy = fn(&FnCtxt, &hir::Expr, Ty);
-
- let tcx = fcx.ccx.tcx;
- let id = expr.id;
- match expr.node {
- hir::ExprBox(ref subexpr) => {
- let expected_inner = expected.to_option(fcx).map_or(NoExpectation, |ty| {
- match ty.sty {
- ty::TyBox(ty) => Expectation::rvalue_hint(tcx, ty),
- _ => NoExpectation
- }
- });
- check_expr_with_expectation(fcx, subexpr, expected_inner);
- let referent_ty = fcx.expr_ty(&subexpr);
- fcx.write_ty(id, tcx.mk_box(referent_ty));
- }
-
- hir::ExprLit(ref lit) => {
- let typ = check_lit(fcx, &lit, expected);
- fcx.write_ty(id, typ);
- }
- hir::ExprBinary(op, ref lhs, ref rhs) => {
- op::check_binop(fcx, expr, op, lhs, rhs);
- }
- hir::ExprAssignOp(op, ref lhs, ref rhs) => {
- op::check_binop_assign(fcx, expr, op, lhs, rhs);
- }
- hir::ExprUnary(unop, ref oprnd) => {
- let expected_inner = match unop {
- hir::UnNot | hir::UnNeg => {
- expected
- }
- hir::UnDeref => {
- NoExpectation
- }
- };
- let lvalue_pref = match unop {
- hir::UnDeref => lvalue_pref,
- _ => NoPreference
- };
- check_expr_with_expectation_and_lvalue_pref(
- fcx, &oprnd, expected_inner, lvalue_pref);
- let mut oprnd_t = fcx.expr_ty(&oprnd);
- if !oprnd_t.references_error() {
- match unop {
- hir::UnDeref => {
- oprnd_t = structurally_resolved_type(fcx, expr.span, oprnd_t);
-
- if let Some(mt) = oprnd_t.builtin_deref(true, NoPreference) {
- oprnd_t = mt.ty;
- } else if let Some(method) = try_overloaded_deref(
- fcx, expr.span, Some(&oprnd), oprnd_t, lvalue_pref) {
- oprnd_t = make_overloaded_lvalue_return_type(tcx, method).ty;
- fcx.inh.tables.borrow_mut().method_map.insert(MethodCall::expr(expr.id),
- method);
- } else {
- fcx.type_error_message(expr.span, |actual| {
- format!("type `{}` cannot be \
- dereferenced", actual)
- }, oprnd_t, None);
- oprnd_t = tcx.types.err;
- }
+ /// Invariant:
+ /// If an expression has any sub-expressions that result in a type error,
+ /// inspecting that expression's type with `ty.references_error()` will return
+ /// true. Likewise, if an expression is known to diverge, inspecting its
+ /// type with `ty::type_is_bot` will return true (n.b.: since Rust is
+ /// strict, _|_ can appear in the type of an expression that does not,
+ /// itself, diverge: for example, fn() -> _|_.)
+ /// Note that inspecting a type's structure *directly* may expose the fact
+ /// that there are actually multiple representations for `TyError`, so avoid
+ /// that when err needs to be handled differently.
+ fn check_expr_with_expectation_and_lvalue_pref(&self,
+ expr: &'gcx hir::Expr,
+ expected: Expectation<'tcx>,
+ lvalue_pref: LvaluePreference) {
+ debug!(">> typechecking: expr={:?} expected={:?}",
+ expr, expected);
+
+ let tcx = self.tcx;
+ let id = expr.id;
+ match expr.node {
+ hir::ExprBox(ref subexpr) => {
+ let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| {
+ match ty.sty {
+ ty::TyBox(ty) => Expectation::rvalue_hint(self, ty),
+ _ => NoExpectation
}
- hir::UnNot => {
- oprnd_t = structurally_resolved_type(fcx, oprnd.span,
- oprnd_t);
- if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) {
- oprnd_t = op::check_user_unop(fcx, "!", "not",
- tcx.lang_items.not_trait(),
- expr, &oprnd, oprnd_t, unop);
- }
+ });
+ self.check_expr_with_expectation(subexpr, expected_inner);
+ let referent_ty = self.expr_ty(&subexpr);
+ self.write_ty(id, tcx.mk_box(referent_ty));
+ }
+
+ hir::ExprLit(ref lit) => {
+ let typ = self.check_lit(&lit, expected);
+ self.write_ty(id, typ);
+ }
+ hir::ExprBinary(op, ref lhs, ref rhs) => {
+ self.check_binop(expr, op, lhs, rhs);
+ }
+ hir::ExprAssignOp(op, ref lhs, ref rhs) => {
+ self.check_binop_assign(expr, op, lhs, rhs);
+ }
+ hir::ExprUnary(unop, ref oprnd) => {
+ let expected_inner = match unop {
+ hir::UnNot | hir::UnNeg => {
+ expected
}
- hir::UnNeg => {
- oprnd_t = structurally_resolved_type(fcx, oprnd.span,
- oprnd_t);
- if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
- oprnd_t = op::check_user_unop(fcx, "-", "neg",
- tcx.lang_items.neg_trait(),
- expr, &oprnd, oprnd_t, unop);
- }
+ hir::UnDeref => {
+ NoExpectation
}
- }
- }
- fcx.write_ty(id, oprnd_t);
- }
- hir::ExprAddrOf(mutbl, ref oprnd) => {
- let hint = expected.only_has_type(fcx).map_or(NoExpectation, |ty| {
- match ty.sty {
- ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => {
- if fcx.tcx().expr_is_lval(&oprnd) {
- // Lvalues may legitimately have unsized types.
- // For example, dereferences of a fat pointer and
- // the last field of a struct can be unsized.
- ExpectHasType(mt.ty)
- } else {
- Expectation::rvalue_hint(tcx, mt.ty)
+ };
+ let lvalue_pref = match unop {
+ hir::UnDeref => lvalue_pref,
+ _ => NoPreference
+ };
+ self.check_expr_with_expectation_and_lvalue_pref(&oprnd,
+ expected_inner,
+ lvalue_pref);
+ let mut oprnd_t = self.expr_ty(&oprnd);
+
+ if !oprnd_t.references_error() {
+ match unop {
+ hir::UnDeref => {
+ oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
+
+ if let Some(mt) = oprnd_t.builtin_deref(true, NoPreference) {
+ oprnd_t = mt.ty;
+ } else if let Some(method) = self.try_overloaded_deref(
+ expr.span, Some(&oprnd), oprnd_t, lvalue_pref) {
+ oprnd_t = self.make_overloaded_lvalue_return_type(method).ty;
+ self.tables.borrow_mut().method_map.insert(MethodCall::expr(expr.id),
+ method);
+ } else {
+ self.type_error_message(expr.span, |actual| {
+ format!("type `{}` cannot be \
+ dereferenced", actual)
+ }, oprnd_t, None);
+ oprnd_t = tcx.types.err;
+ }
+ }
+ hir::UnNot => {
+ oprnd_t = self.structurally_resolved_type(oprnd.span,
+ oprnd_t);
+ if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) {
+ oprnd_t = self.check_user_unop("!", "not",
+ tcx.lang_items.not_trait(),
+ expr, &oprnd, oprnd_t, unop);
+ }
+ }
+ hir::UnNeg => {
+ oprnd_t = self.structurally_resolved_type(oprnd.span,
+ oprnd_t);
+ if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
+ oprnd_t = self.check_user_unop("-", "neg",
+ tcx.lang_items.neg_trait(),
+ expr, &oprnd, oprnd_t, unop);
+ }
}
}
- _ => NoExpectation
}
- });
- let lvalue_pref = LvaluePreference::from_mutbl(mutbl);
- check_expr_with_expectation_and_lvalue_pref(fcx,
- &oprnd,
- hint,
- lvalue_pref);
-
- let tm = ty::TypeAndMut { ty: fcx.expr_ty(&oprnd), mutbl: mutbl };
- let oprnd_t = if tm.ty.references_error() {
- tcx.types.err
- } else {
- // Note: at this point, we cannot say what the best lifetime
- // is to use for resulting pointer. We want to use the
- // shortest lifetime possible so as to avoid spurious borrowck
- // errors. Moreover, the longest lifetime will depend on the
- // precise details of the value whose address is being taken
- // (and how long it is valid), which we don't know yet until type
- // inference is complete.
- //
- // Therefore, here we simply generate a region variable. The
- // region inferencer will then select the ultimate value.
- // Finally, borrowck is charged with guaranteeing that the
- // value whose address was taken can actually be made to live
- // as long as it needs to live.
- let region = fcx.infcx().next_region_var(infer::AddrOfRegion(expr.span));
- tcx.mk_ref(tcx.mk_region(region), tm)
- };
- fcx.write_ty(id, oprnd_t);
- }
- hir::ExprPath(ref maybe_qself, ref path) => {
- let opt_self_ty = maybe_qself.as_ref().map(|qself| {
- fcx.to_ty(&qself.ty)
- });
-
- let path_res = if let Some(&d) = tcx.def_map.borrow().get(&id) {
- d
- } else if let Some(hir::QSelf { position: 0, .. }) = *maybe_qself {
- // Create some fake resolution that can't possibly be a type.
- def::PathResolution {
- base_def: Def::Mod(tcx.map.local_def_id(ast::CRATE_NODE_ID)),
- depth: path.segments.len()
+ self.write_ty(id, oprnd_t);
+ }
+ hir::ExprAddrOf(mutbl, ref oprnd) => {
+ let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
+ match ty.sty {
+ ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => {
+ if self.tcx.expr_is_lval(&oprnd) {
+ // Lvalues may legitimately have unsized types.
+ // For example, dereferences of a fat pointer and
+ // the last field of a struct can be unsized.
+ ExpectHasType(mt.ty)
+ } else {
+ Expectation::rvalue_hint(self, mt.ty)
+ }
+ }
+ _ => NoExpectation
}
+ });
+ let lvalue_pref = LvaluePreference::from_mutbl(mutbl);
+ self.check_expr_with_expectation_and_lvalue_pref(&oprnd, hint, lvalue_pref);
+
+ let tm = ty::TypeAndMut { ty: self.expr_ty(&oprnd), mutbl: mutbl };
+ let oprnd_t = if tm.ty.references_error() {
+ tcx.types.err
} else {
- span_bug!(expr.span, "unbound path {:?}", expr)
- };
-
- if let Some((opt_ty, segments, def)) =
- resolve_ty_and_def_ufcs(fcx, path_res, opt_self_ty, path,
- expr.span, expr.id) {
- if def != Def::Err {
- let (scheme, predicates) = type_scheme_and_predicates_for_def(fcx,
- expr.span,
- def);
- instantiate_path(fcx,
- segments,
- scheme,
- &predicates,
- opt_ty,
- def,
- expr.span,
- id);
- } else {
- fcx.infcx().set_tainted_by_errors();
- fcx.write_ty(id, fcx.tcx().types.err);
- }
+ // Note: at this point, we cannot say what the best lifetime
+ // is to use for resulting pointer. We want to use the
+ // shortest lifetime possible so as to avoid spurious borrowck
+ // errors. Moreover, the longest lifetime will depend on the
+ // precise details of the value whose address is being taken
+ // (and how long it is valid), which we don't know yet until type
+ // inference is complete.
+ //
+ // Therefore, here we simply generate a region variable. The
+ // region inferencer will then select the ultimate value.
+ // Finally, borrowck is charged with guaranteeing that the
+ // value whose address was taken can actually be made to live
+ // as long as it needs to live.
+ let region = self.next_region_var(infer::AddrOfRegion(expr.span));
+ tcx.mk_ref(tcx.mk_region(region), tm)
+ };
+ self.write_ty(id, oprnd_t);
}
+ hir::ExprPath(ref maybe_qself, ref path) => {
+ let opt_self_ty = maybe_qself.as_ref().map(|qself| {
+ self.to_ty(&qself.ty)
+ });
+
+ let path_res = if let Some(&d) = tcx.def_map.borrow().get(&id) {
+ d
+ } else if let Some(hir::QSelf { position: 0, .. }) = *maybe_qself {
+ // Create some fake resolution that can't possibly be a type.
+ def::PathResolution {
+ base_def: Def::Mod(tcx.map.local_def_id(ast::CRATE_NODE_ID)),
+ depth: path.segments.len()
+ }
+ } else {
+ span_bug!(expr.span, "unbound path {:?}", expr)
+ };
+
+ if let Some((opt_ty, segments, def)) =
+ self.resolve_ty_and_def_ufcs(path_res, opt_self_ty, path,
+ expr.span, expr.id) {
+ if def != Def::Err {
+ let (scheme, predicates) = self.type_scheme_and_predicates_for_def(expr.span,
+ def);
+ self.instantiate_path(segments, scheme, &predicates,
+ opt_ty, def, expr.span, id);
+ } else {
+ self.set_tainted_by_errors();
+ self.write_ty(id, self.tcx.types.err);
+ }
+ }
- // We always require that the type provided as the value for
- // a type parameter outlives the moment of instantiation.
- fcx.opt_node_ty_substs(expr.id, |item_substs| {
- fcx.add_wf_bounds(&item_substs.substs, expr);
- });
- }
- hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
- for output in outputs {
- check_expr(fcx, output);
+ // We always require that the type provided as the value for
+ // a type parameter outlives the moment of instantiation.
+ self.opt_node_ty_substs(expr.id, |item_substs| {
+ self.add_wf_bounds(&item_substs.substs, expr);
+ });
}
- for input in inputs {
- check_expr(fcx, input);
+ hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
+ for output in outputs {
+ self.check_expr(output);
+ }
+ for input in inputs {
+ self.check_expr(input);
+ }
+ self.write_nil(id);
}
- fcx.write_nil(id);
- }
- hir::ExprBreak(_) => { fcx.write_ty(id, fcx.infcx().next_diverging_ty_var()); }
- hir::ExprAgain(_) => { fcx.write_ty(id, fcx.infcx().next_diverging_ty_var()); }
- hir::ExprRet(ref expr_opt) => {
- match fcx.ret_ty {
- ty::FnConverging(result_type) => {
- match *expr_opt {
- None =>
- if let Err(_) = fcx.mk_eqty(false, TypeOrigin::Misc(expr.span),
- result_type, fcx.tcx().mk_nil()) {
+ hir::ExprBreak(_) => { self.write_ty(id, self.next_diverging_ty_var()); }
+ hir::ExprAgain(_) => { self.write_ty(id, self.next_diverging_ty_var()); }
+ hir::ExprRet(ref expr_opt) => {
+ match self.ret_ty {
+ ty::FnConverging(result_type) => {
+ if let Some(ref e) = *expr_opt {
+ self.check_expr_coercable_to_type(&e, result_type);
+ } else {
+ let eq_result = self.eq_types(false,
+ TypeOrigin::Misc(expr.span),
+ result_type,
+ tcx.mk_nil())
+ // FIXME(#32730) propagate obligations
+ .map(|InferOk { obligations, .. }| assert!(obligations.is_empty()));
+ if eq_result.is_err() {
span_err!(tcx.sess, expr.span, E0069,
- "`return;` in a function whose return type is \
- not `()`");
- },
- Some(ref e) => {
- check_expr_coercable_to_type(fcx, &e, result_type);
+ "`return;` in a function whose return type is not `()`");
+ }
}
}
- }
- ty::FnDiverging => {
- if let Some(ref e) = *expr_opt {
- check_expr(fcx, &e);
+ ty::FnDiverging => {
+ if let Some(ref e) = *expr_opt {
+ self.check_expr(&e);
+ }
+ span_err!(tcx.sess, expr.span, E0166,
+ "`return` in a function declared as diverging");
}
- span_err!(tcx.sess, expr.span, E0166,
- "`return` in a function declared as diverging");
}
- }
- fcx.write_ty(id, fcx.infcx().next_diverging_ty_var());
- }
- hir::ExprAssign(ref lhs, ref rhs) => {
- check_expr_with_lvalue_pref(fcx, &lhs, PreferMutLvalue);
-
- let tcx = fcx.tcx();
- if !tcx.expr_is_lval(&lhs) {
- span_err!(tcx.sess, expr.span, E0070,
- "invalid left-hand side expression");
- }
+ self.write_ty(id, self.next_diverging_ty_var());
+ }
+ hir::ExprAssign(ref lhs, ref rhs) => {
+ self.check_expr_with_lvalue_pref(&lhs, PreferMutLvalue);
- let lhs_ty = fcx.expr_ty(&lhs);
- check_expr_coercable_to_type(fcx, &rhs, lhs_ty);
- let rhs_ty = fcx.expr_ty(&rhs);
+ let tcx = self.tcx;
+ if !tcx.expr_is_lval(&lhs) {
+ span_err!(tcx.sess, expr.span, E0070,
+ "invalid left-hand side expression");
+ }
- fcx.require_expr_have_sized_type(&lhs, traits::AssignmentLhsSized);
+ let lhs_ty = self.expr_ty(&lhs);
+ self.check_expr_coercable_to_type(&rhs, lhs_ty);
+ let rhs_ty = self.expr_ty(&rhs);
- if lhs_ty.references_error() || rhs_ty.references_error() {
- fcx.write_error(id);
- } else {
- fcx.write_nil(id);
- }
- }
- hir::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => {
- check_then_else(fcx, &cond, &then_blk, opt_else_expr.as_ref().map(|e| &**e),
- id, expr.span, expected);
- }
- hir::ExprWhile(ref cond, ref body, _) => {
- check_expr_has_type(fcx, &cond, tcx.types.bool);
- check_block_no_value(fcx, &body);
- let cond_ty = fcx.expr_ty(&cond);
- let body_ty = fcx.node_ty(body.id);
- if cond_ty.references_error() || body_ty.references_error() {
- fcx.write_error(id);
- }
- else {
- fcx.write_nil(id);
- }
- }
- hir::ExprLoop(ref body, _) => {
- check_block_no_value(fcx, &body);
- if !may_break(tcx, expr.id, &body) {
- fcx.write_ty(id, fcx.infcx().next_diverging_ty_var());
- } else {
- fcx.write_nil(id);
- }
- }
- hir::ExprMatch(ref discrim, ref arms, match_src) => {
- _match::check_match(fcx, expr, &discrim, arms, expected, match_src);
- }
- hir::ExprClosure(capture, ref decl, ref body, _) => {
- closure::check_expr_closure(fcx, expr, capture, &decl, &body, expected);
- }
- hir::ExprBlock(ref b) => {
- check_block_with_expected(fcx, &b, expected);
- fcx.write_ty(id, fcx.node_ty(b.id));
- }
- hir::ExprCall(ref callee, ref args) => {
- callee::check_call(fcx, expr, &callee, &args[..], expected);
+ self.require_expr_have_sized_type(&lhs, traits::AssignmentLhsSized);
- // we must check that return type of called functions is WF:
- let ret_ty = fcx.expr_ty(expr);
- fcx.register_wf_obligation(ret_ty, expr.span, traits::MiscObligation);
- }
- hir::ExprMethodCall(name, ref tps, ref args) => {
- check_method_call(fcx, expr, name, &args[..], &tps[..], expected, lvalue_pref);
- let arg_tys = args.iter().map(|a| fcx.expr_ty(&a));
- let args_err = arg_tys.fold(false, |rest_err, a| rest_err || a.references_error());
- if args_err {
- fcx.write_error(id);
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ self.write_error(id);
+ } else {
+ self.write_nil(id);
+ }
}
- }
- hir::ExprCast(ref e, ref t) => {
- if let hir::TyFixedLengthVec(_, ref count_expr) = t.node {
- check_expr_with_hint(fcx, &count_expr, tcx.types.usize);
- }
-
- // Find the type of `e`. Supply hints based on the type we are casting to,
- // if appropriate.
- let t_cast = fcx.to_ty(t);
- let t_cast = fcx.infcx().resolve_type_vars_if_possible(&t_cast);
- check_expr_with_expectation(fcx, e, ExpectCastableToType(t_cast));
- let t_expr = fcx.expr_ty(e);
- let t_cast = fcx.infcx().resolve_type_vars_if_possible(&t_cast);
-
- // Eagerly check for some obvious errors.
- if t_expr.references_error() || t_cast.references_error() {
- fcx.write_error(id);
- } else {
- // Write a type for the whole expression, assuming everything is going
- // to work out Ok.
- fcx.write_ty(id, t_cast);
-
- // Defer other checks until we're done type checking.
- let mut deferred_cast_checks = fcx.inh.deferred_cast_checks.borrow_mut();
- match cast::CastCheck::new(fcx, e, t_expr, t_cast, t.span, expr.span) {
- Ok(cast_check) => {
- deferred_cast_checks.push(cast_check);
- }
- Err(ErrorReported) => {
- fcx.write_error(id);
- }
+ hir::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => {
+ self.check_then_else(&cond, &then_blk, opt_else_expr.as_ref().map(|e| &**e),
+ id, expr.span, expected);
+ }
+ hir::ExprWhile(ref cond, ref body, _) => {
+ self.check_expr_has_type(&cond, tcx.types.bool);
+ self.check_block_no_value(&body);
+ let cond_ty = self.expr_ty(&cond);
+ let body_ty = self.node_ty(body.id);
+ if cond_ty.references_error() || body_ty.references_error() {
+ self.write_error(id);
}
- }
- }
- hir::ExprType(ref e, ref t) => {
- let typ = fcx.to_ty(&t);
- check_expr_eq_type(fcx, &e, typ);
- fcx.write_ty(id, typ);
- }
- hir::ExprVec(ref args) => {
- let uty = expected.to_option(fcx).and_then(|uty| {
- match uty.sty {
- ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
- _ => None
+ else {
+ self.write_nil(id);
}
- });
-
- let mut unified = fcx.infcx().next_ty_var();
- let coerce_to = uty.unwrap_or(unified);
+ }
+ hir::ExprLoop(ref body, _) => {
+ self.check_block_no_value(&body);
+ if !may_break(tcx, expr.id, &body) {
+ self.write_ty(id, self.next_diverging_ty_var());
+ } else {
+ self.write_nil(id);
+ }
+ }
+ hir::ExprMatch(ref discrim, ref arms, match_src) => {
+ self.check_match(expr, &discrim, arms, expected, match_src);
+ }
+ hir::ExprClosure(capture, ref decl, ref body, _) => {
+ self.check_expr_closure(expr, capture, &decl, &body, expected);
+ }
+ hir::ExprBlock(ref b) => {
+ self.check_block_with_expected(&b, expected);
+ self.write_ty(id, self.node_ty(b.id));
+ }
+ hir::ExprCall(ref callee, ref args) => {
+ self.check_call(expr, &callee, &args[..], expected);
- for (i, e) in args.iter().enumerate() {
- check_expr_with_hint(fcx, e, coerce_to);
- let e_ty = fcx.expr_ty(e);
- let origin = TypeOrigin::Misc(e.span);
+ // we must check that return type of called functions is WF:
+ let ret_ty = self.expr_ty(expr);
+ self.register_wf_obligation(ret_ty, expr.span, traits::MiscObligation);
+ }
+ hir::ExprMethodCall(name, ref tps, ref args) => {
+ self.check_method_call(expr, name, &args[..], &tps[..], expected, lvalue_pref);
+ let arg_tys = args.iter().map(|a| self.expr_ty(&a));
+ let args_err = arg_tys.fold(false, |rest_err, a| rest_err || a.references_error());
+ if args_err {
+ self.write_error(id);
+ }
+ }
+ hir::ExprCast(ref e, ref t) => {
+ if let hir::TyFixedLengthVec(_, ref count_expr) = t.node {
+ self.check_expr_with_hint(&count_expr, tcx.types.usize);
+ }
- // Special-case the first element, as it has no "previous expressions".
- let result = if i == 0 {
- coercion::try(fcx, e, coerce_to)
+ // Find the type of `e`. Supply hints based on the type we are casting to,
+ // if appropriate.
+ let t_cast = self.to_ty(t);
+ let t_cast = self.resolve_type_vars_if_possible(&t_cast);
+ self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
+ let t_expr = self.expr_ty(e);
+ let t_cast = self.resolve_type_vars_if_possible(&t_cast);
+
+ // Eagerly check for some obvious errors.
+ if t_expr.references_error() || t_cast.references_error() {
+ self.write_error(id);
} else {
- let prev_elems = || args[..i].iter().map(|e| &**e);
- coercion::try_find_lub(fcx, origin, prev_elems, unified, e)
- };
-
- match result {
- Ok(ty) => unified = ty,
- Err(e) => {
- fcx.infcx().report_mismatched_types(origin, unified, e_ty, e);
+ // Write a type for the whole expression, assuming everything is going
+ // to work out Ok.
+ self.write_ty(id, t_cast);
+
+ // Defer other checks until we're done type checking.
+ let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
+ match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) {
+ Ok(cast_check) => {
+ deferred_cast_checks.push(cast_check);
+ }
+ Err(ErrorReported) => {
+ self.write_error(id);
+ }
}
}
- }
- fcx.write_ty(id, tcx.mk_array(unified, args.len()));
- }
- hir::ExprRepeat(ref element, ref count_expr) => {
- check_expr_has_type(fcx, &count_expr, tcx.types.usize);
- let count = eval_repeat_count(fcx.tcx(), &count_expr);
-
- let uty = match expected {
- ExpectHasType(uty) => {
+ }
+ hir::ExprType(ref e, ref t) => {
+ let typ = self.to_ty(&t);
+ self.check_expr_eq_type(&e, typ);
+ self.write_ty(id, typ);
+ }
+ hir::ExprVec(ref args) => {
+ let uty = expected.to_option(self).and_then(|uty| {
match uty.sty {
ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
_ => None
}
- }
- _ => None
- };
+ });
- let (element_ty, t) = match uty {
- Some(uty) => {
- check_expr_coercable_to_type(fcx, &element, uty);
- (uty, uty)
- }
- None => {
- let t: Ty = fcx.infcx().next_ty_var();
- check_expr_has_type(fcx, &element, t);
- (fcx.expr_ty(&element), t)
- }
- };
+ let mut unified = self.next_ty_var();
+ let coerce_to = uty.unwrap_or(unified);
- if count > 1 {
- // For [foo, ..n] where n > 1, `foo` must have
- // Copy type:
- fcx.require_type_meets(
- t,
- expr.span,
- traits::RepeatVec,
- ty::BoundCopy);
- }
+ for (i, e) in args.iter().enumerate() {
+ self.check_expr_with_hint(e, coerce_to);
+ let e_ty = self.expr_ty(e);
+ let origin = TypeOrigin::Misc(e.span);
- if element_ty.references_error() {
- fcx.write_error(id);
- } else {
- let t = tcx.mk_array(t, count);
- fcx.write_ty(id, t);
- }
- }
- hir::ExprTup(ref elts) => {
- let flds = expected.only_has_type(fcx).and_then(|ty| {
- match ty.sty {
- ty::TyTuple(ref flds) => Some(&flds[..]),
- _ => None
- }
- });
- let mut err_field = false;
-
- let elt_ts = elts.iter().enumerate().map(|(i, e)| {
- let t = match flds {
- Some(ref fs) if i < fs.len() => {
- let ety = fs[i];
- check_expr_coercable_to_type(fcx, &e, ety);
- ety
+ // Special-case the first element, as it has no "previous expressions".
+ let result = if i == 0 {
+ self.try_coerce(e, coerce_to)
+ } else {
+ let prev_elems = || args[..i].iter().map(|e| &**e);
+ self.try_find_coercion_lub(origin, prev_elems, unified, e)
+ };
+
+ match result {
+ Ok(ty) => unified = ty,
+ Err(e) => {
+ self.report_mismatched_types(origin, unified, e_ty, e);
+ }
}
- _ => {
- check_expr_with_expectation(fcx, &e, NoExpectation);
- fcx.expr_ty(&e)
+ }
+ self.write_ty(id, tcx.mk_array(unified, args.len()));
+ }
+ hir::ExprRepeat(ref element, ref count_expr) => {
+ self.check_expr_has_type(&count_expr, tcx.types.usize);
+ let count = eval_repeat_count(self.tcx.global_tcx(), &count_expr);
+
+ let uty = match expected {
+ ExpectHasType(uty) => {
+ match uty.sty {
+ ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
+ _ => None
+ }
}
+ _ => None
};
- err_field = err_field || t.references_error();
- t
- }).collect();
- if err_field {
- fcx.write_error(id);
- } else {
- let typ = tcx.mk_tup(elt_ts);
- fcx.write_ty(id, typ);
- }
- }
- hir::ExprStruct(ref path, ref fields, ref base_expr) => {
- check_expr_struct(fcx, expr, path, fields, base_expr);
- fcx.require_expr_have_sized_type(expr, traits::StructInitializerSized);
- }
- hir::ExprField(ref base, ref field) => {
- check_field(fcx, expr, lvalue_pref, &base, field);
- }
- hir::ExprTupField(ref base, idx) => {
- check_tup_field(fcx, expr, lvalue_pref, &base, idx);
- }
- hir::ExprIndex(ref base, ref idx) => {
- check_expr_with_lvalue_pref(fcx, &base, lvalue_pref);
- check_expr(fcx, &idx);
-
- let base_t = fcx.expr_ty(&base);
- let idx_t = fcx.expr_ty(&idx);
-
- if base_t.references_error() {
- fcx.write_ty(id, base_t);
- } else if idx_t.references_error() {
- fcx.write_ty(id, idx_t);
- } else {
- let base_t = structurally_resolved_type(fcx, expr.span, base_t);
- match lookup_indexing(fcx, expr, base, base_t, idx_t, lvalue_pref) {
- Some((index_ty, element_ty)) => {
- let idx_expr_ty = fcx.expr_ty(idx);
- demand::eqtype(fcx, expr.span, index_ty, idx_expr_ty);
- fcx.write_ty(id, element_ty);
- }
- None => {
- check_expr_has_type(fcx, &idx, fcx.tcx().types.err);
- fcx.type_error_message(
- expr.span,
- |actual| {
- format!("cannot index a value of type `{}`",
- actual)
- },
- base_t,
- None);
- fcx.write_ty(id, fcx.tcx().types.err);
- }
- }
- }
- }
- }
-
- debug!("type of expr({}) {} is...", expr.id,
- pprust::expr_to_string(expr));
- debug!("... {:?}, expected is {:?}",
- fcx.expr_ty(expr),
- expected);
-}
+ let (element_ty, t) = match uty {
+ Some(uty) => {
+ self.check_expr_coercable_to_type(&element, uty);
+ (uty, uty)
+ }
+ None => {
+ let t: Ty = self.next_ty_var();
+ self.check_expr_has_type(&element, t);
+ (self.expr_ty(&element), t)
+ }
+ };
-pub fn resolve_ty_and_def_ufcs<'a, 'b, 'tcx>(fcx: &FnCtxt<'b, 'tcx>,
- path_res: def::PathResolution,
- opt_self_ty: Option<Ty<'tcx>>,
- path: &'a hir::Path,
- span: Span,
- node_id: ast::NodeId)
- -> Option<(Option<Ty<'tcx>>,
- &'a [hir::PathSegment],
- Def)>
-{
+ if count > 1 {
+ // For [foo, ..n] where n > 1, `foo` must have
+ // Copy type:
+ self.require_type_meets(t, expr.span, traits::RepeatVec, ty::BoundCopy);
+ }
- // If fully resolved already, we don't have to do anything.
- if path_res.depth == 0 {
- Some((opt_self_ty, &path.segments, path_res.base_def))
- } else {
- let mut def = path_res.base_def;
- let ty_segments = path.segments.split_last().unwrap().1;
- let base_ty_end = path.segments.len() - path_res.depth;
- let ty = astconv::finish_resolving_def_to_ty(fcx, fcx, span,
- PathParamMode::Optional,
- &mut def,
- opt_self_ty,
- &ty_segments[..base_ty_end],
- &ty_segments[base_ty_end..]);
- let item_segment = path.segments.last().unwrap();
- let item_name = item_segment.identifier.name;
- let def = match method::resolve_ufcs(fcx, span, item_name, ty, node_id) {
- Ok(def) => Some(def),
- Err(error) => {
- let def = match error {
- method::MethodError::PrivateMatch(def) => Some(def),
- _ => None,
- };
- if item_name != keywords::Invalid.name() {
- method::report_error(fcx, span, ty, item_name, None, error);
+ if element_ty.references_error() {
+ self.write_error(id);
+ } else {
+ let t = tcx.mk_array(t, count);
+ self.write_ty(id, t);
+ }
+ }
+ hir::ExprTup(ref elts) => {
+ let flds = expected.only_has_type(self).and_then(|ty| {
+ match ty.sty {
+ ty::TyTuple(ref flds) => Some(&flds[..]),
+ _ => None
}
- def
+ });
+ let mut err_field = false;
+
+ let elt_ts = elts.iter().enumerate().map(|(i, e)| {
+ let t = match flds {
+ Some(ref fs) if i < fs.len() => {
+ let ety = fs[i];
+ self.check_expr_coercable_to_type(&e, ety);
+ ety
+ }
+ _ => {
+ self.check_expr_with_expectation(&e, NoExpectation);
+ self.expr_ty(&e)
+ }
+ };
+ err_field = err_field || t.references_error();
+ t
+ }).collect();
+ if err_field {
+ self.write_error(id);
+ } else {
+ let typ = tcx.mk_tup(elt_ts);
+ self.write_ty(id, typ);
}
- };
+ }
+ hir::ExprStruct(ref path, ref fields, ref base_expr) => {
+ self.check_expr_struct(expr, path, fields, base_expr);
- if let Some(def) = def {
- // Write back the new resolution.
- fcx.ccx.tcx.def_map.borrow_mut().insert(node_id, def::PathResolution {
- base_def: def,
- depth: 0,
- });
- Some((Some(ty), slice::ref_slice(item_segment), def))
- } else {
- fcx.write_error(node_id);
- None
- }
- }
-}
+ self.require_expr_have_sized_type(expr, traits::StructInitializerSized);
+ }
+ hir::ExprField(ref base, ref field) => {
+ self.check_field(expr, lvalue_pref, &base, field);
+ }
+ hir::ExprTupField(ref base, idx) => {
+ self.check_tup_field(expr, lvalue_pref, &base, idx);
+ }
+ hir::ExprIndex(ref base, ref idx) => {
+ self.check_expr_with_lvalue_pref(&base, lvalue_pref);
+ self.check_expr(&idx);
-impl<'tcx> Expectation<'tcx> {
- /// Provide an expectation for an rvalue expression given an *optional*
- /// hint, which is not required for type safety (the resulting type might
- /// be checked higher up, as is the case with `&expr` and `box expr`), but
- /// is useful in determining the concrete type.
- ///
- /// The primary use case is where the expected type is a fat pointer,
- /// like `&[isize]`. For example, consider the following statement:
- ///
- /// let x: &[isize] = &[1, 2, 3];
- ///
- /// In this case, the expected type for the `&[1, 2, 3]` expression is
- /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the
- /// expectation `ExpectHasType([isize])`, that would be too strong --
- /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`.
- /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced
- /// to the type `&[isize]`. Therefore, we propagate this more limited hint,
- /// which still is useful, because it informs integer literals and the like.
- /// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169
- /// for examples of where this comes up,.
- fn rvalue_hint(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
- match tcx.struct_tail(ty).sty {
- ty::TySlice(_) | ty::TyStr | ty::TyTrait(..) => {
- ExpectRvalueLikeUnsized(ty)
- }
- _ => ExpectHasType(ty)
- }
- }
+ let base_t = self.expr_ty(&base);
+ let idx_t = self.expr_ty(&idx);
- // Resolves `expected` by a single level if it is a variable. If
- // there is no expected type or resolution is not possible (e.g.,
- // no constraints yet present), just returns `None`.
- fn resolve<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
- match self {
- NoExpectation => {
- NoExpectation
- }
- ExpectCastableToType(t) => {
- ExpectCastableToType(
- fcx.infcx().resolve_type_vars_if_possible(&t))
- }
- ExpectHasType(t) => {
- ExpectHasType(
- fcx.infcx().resolve_type_vars_if_possible(&t))
- }
- ExpectRvalueLikeUnsized(t) => {
- ExpectRvalueLikeUnsized(
- fcx.infcx().resolve_type_vars_if_possible(&t))
- }
+ if base_t.references_error() {
+ self.write_ty(id, base_t);
+ } else if idx_t.references_error() {
+ self.write_ty(id, idx_t);
+ } else {
+ let base_t = self.structurally_resolved_type(expr.span, base_t);
+ match self.lookup_indexing(expr, base, base_t, idx_t, lvalue_pref) {
+ Some((index_ty, element_ty)) => {
+ let idx_expr_ty = self.expr_ty(idx);
+ self.demand_eqtype(expr.span, index_ty, idx_expr_ty);
+ self.write_ty(id, element_ty);
+ }
+ None => {
+ self.check_expr_has_type(&idx, self.tcx.types.err);
+ let mut err = self.type_error_struct(
+ expr.span,
+ |actual| {
+ format!("cannot index a value of type `{}`",
+ actual)
+ },
+ base_t,
+ None);
+ // Try to give some advice about indexing tuples.
+ if let ty::TyTuple(_) = base_t.sty {
+ let mut needs_note = true;
+ // If the index is an integer, we can show the actual
+ // fixed expression:
+ if let hir::ExprLit(ref lit) = idx.node {
+ if let ast::LitKind::Int(i,
+ ast::LitIntType::Unsuffixed) = lit.node {
+ let snip = tcx.sess.codemap().span_to_snippet(base.span);
+ if let Ok(snip) = snip {
+ err.span_suggestion(expr.span,
+ "to access tuple elements, \
+ use tuple indexing syntax \
+ as shown",
+ format!("{}.{}", snip, i));
+ needs_note = false;
+ }
+ }
+ }
+ if needs_note {
+ err.help("to access tuple elements, use tuple indexing \
+ syntax (e.g. `tuple.0`)");
+ }
+ }
+ err.emit();
+ self.write_ty(id, self.tcx().types.err);
+ }
+ }
+ }
+ }
}
- }
- fn to_option<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
- match self.resolve(fcx) {
- NoExpectation => None,
- ExpectCastableToType(ty) |
- ExpectHasType(ty) |
- ExpectRvalueLikeUnsized(ty) => Some(ty),
- }
+ debug!("type of expr({}) {} is...", expr.id,
+ pprust::expr_to_string(expr));
+ debug!("... {:?}, expected is {:?}",
+ self.expr_ty(expr),
+ expected);
}
- fn only_has_type<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
- match self.resolve(fcx) {
- ExpectHasType(ty) => Some(ty),
- _ => None
+ pub fn resolve_ty_and_def_ufcs<'b>(&self,
+ path_res: def::PathResolution,
+ opt_self_ty: Option<Ty<'tcx>>,
+ path: &'b hir::Path,
+ span: Span,
+ node_id: ast::NodeId)
+ -> Option<(Option<Ty<'tcx>>, &'b [hir::PathSegment], Def)>
+ {
+
+ // If fully resolved already, we don't have to do anything.
+ if path_res.depth == 0 {
+ Some((opt_self_ty, &path.segments, path_res.base_def))
+ } else {
+ let mut def = path_res.base_def;
+ let ty_segments = path.segments.split_last().unwrap().1;
+ let base_ty_end = path.segments.len() - path_res.depth;
+ let ty = AstConv::finish_resolving_def_to_ty(self, self, span,
+ PathParamMode::Optional,
+ &mut def,
+ opt_self_ty,
+ &ty_segments[..base_ty_end],
+ &ty_segments[base_ty_end..]);
+ let item_segment = path.segments.last().unwrap();
+ let item_name = item_segment.identifier.name;
+ let def = match self.resolve_ufcs(span, item_name, ty, node_id) {
+ Ok(def) => Some(def),
+ Err(error) => {
+ let def = match error {
+ method::MethodError::PrivateMatch(def) => Some(def),
+ _ => None,
+ };
+ if item_name != keywords::Invalid.name() {
+ self.report_method_error(span, ty, item_name, None, error);
+ }
+ def
+ }
+ };
+
+ if let Some(def) = def {
+ // Write back the new resolution.
+ self.tcx().def_map.borrow_mut().insert(node_id, def::PathResolution {
+ base_def: def,
+ depth: 0,
+ });
+ Some((Some(ty), slice::ref_slice(item_segment), def))
+ } else {
+ self.write_error(node_id);
+ None
+ }
}
}
-}
-pub fn check_decl_initializer<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- local: &'tcx hir::Local,
- init: &'tcx hir::Expr)
-{
- let ref_bindings = fcx.tcx().pat_contains_ref_binding(&local.pat);
-
- let local_ty = fcx.local_ty(init.span, local.id);
- if let Some(m) = ref_bindings {
- // Somewhat subtle: if we have a `ref` binding in the pattern,
- // we want to avoid introducing coercions for the RHS. This is
- // both because it helps preserve sanity and, in the case of
- // ref mut, for soundness (issue #23116). In particular, in
- // the latter case, we need to be clear that the type of the
- // referent for the reference that results is *equal to* the
- // type of the lvalue it is referencing, and not some
- // supertype thereof.
- check_expr_with_lvalue_pref(fcx, init, LvaluePreference::from_mutbl(m));
- let init_ty = fcx.expr_ty(init);
- demand::eqtype(fcx, init.span, init_ty, local_ty);
- } else {
- check_expr_coercable_to_type(fcx, init, local_ty)
- };
-}
+ pub fn check_decl_initializer(&self,
+ local: &'gcx hir::Local,
+ init: &'gcx hir::Expr)
+ {
+ let ref_bindings = self.tcx.pat_contains_ref_binding(&local.pat);
+
+ let local_ty = self.local_ty(init.span, local.id);
+ if let Some(m) = ref_bindings {
+ // Somewhat subtle: if we have a `ref` binding in the pattern,
+ // we want to avoid introducing coercions for the RHS. This is
+ // both because it helps preserve sanity and, in the case of
+ // ref mut, for soundness (issue #23116). In particular, in
+ // the latter case, we need to be clear that the type of the
+ // referent for the reference that results is *equal to* the
+ // type of the lvalue it is referencing, and not some
+ // supertype thereof.
+ self.check_expr_with_lvalue_pref(init, LvaluePreference::from_mutbl(m));
+ let init_ty = self.expr_ty(init);
+ self.demand_eqtype(init.span, init_ty, local_ty);
+ } else {
+ self.check_expr_coercable_to_type(init, local_ty)
+ };
+ }
-pub fn check_decl_local<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, local: &'tcx hir::Local) {
- let tcx = fcx.ccx.tcx;
+ pub fn check_decl_local(&self, local: &'gcx hir::Local) {
+ let tcx = self.tcx;
- let t = fcx.local_ty(local.span, local.id);
- fcx.write_ty(local.id, t);
+ let t = self.local_ty(local.span, local.id);
+ self.write_ty(local.id, t);
- if let Some(ref init) = local.init {
- check_decl_initializer(fcx, local, &init);
- let init_ty = fcx.expr_ty(&init);
- if init_ty.references_error() {
- fcx.write_ty(local.id, init_ty);
+ if let Some(ref init) = local.init {
+ self.check_decl_initializer(local, &init);
+ let init_ty = self.expr_ty(&init);
+ if init_ty.references_error() {
+ self.write_ty(local.id, init_ty);
+ }
}
- }
-
- let pcx = pat_ctxt {
- fcx: fcx,
- map: pat_id_map(&tcx.def_map, &local.pat),
- };
- _match::check_pat(&pcx, &local.pat, t);
- let pat_ty = fcx.node_ty(local.pat.id);
- if pat_ty.references_error() {
- fcx.write_ty(local.id, pat_ty);
- }
-}
-pub fn check_stmt<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, stmt: &'tcx hir::Stmt) {
- let node_id;
- let mut saw_bot = false;
- let mut saw_err = false;
- match stmt.node {
- hir::StmtDecl(ref decl, id) => {
- node_id = id;
- match decl.node {
- hir::DeclLocal(ref l) => {
- check_decl_local(fcx, &l);
- let l_t = fcx.node_ty(l.id);
- saw_bot = saw_bot || fcx.infcx().type_var_diverges(l_t);
- saw_err = saw_err || l_t.references_error();
+ let pcx = PatCtxt {
+ fcx: self,
+ map: pat_id_map(&tcx.def_map, &local.pat),
+ };
+ pcx.check_pat(&local.pat, t);
+ let pat_ty = self.node_ty(local.pat.id);
+ if pat_ty.references_error() {
+ self.write_ty(local.id, pat_ty);
+ }
+ }
+
+ pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) {
+ let node_id;
+ let mut saw_bot = false;
+ let mut saw_err = false;
+ match stmt.node {
+ hir::StmtDecl(ref decl, id) => {
+ node_id = id;
+ match decl.node {
+ hir::DeclLocal(ref l) => {
+ self.check_decl_local(&l);
+ let l_t = self.node_ty(l.id);
+ saw_bot = saw_bot || self.type_var_diverges(l_t);
+ saw_err = saw_err || l_t.references_error();
+ }
+ hir::DeclItem(_) => {/* ignore for now */ }
+ }
+ }
+ hir::StmtExpr(ref expr, id) => {
+ node_id = id;
+ // Check with expected type of ()
+ self.check_expr_has_type(&expr, self.tcx.mk_nil());
+ let expr_ty = self.expr_ty(&expr);
+ saw_bot = saw_bot || self.type_var_diverges(expr_ty);
+ saw_err = saw_err || expr_ty.references_error();
+ }
+ hir::StmtSemi(ref expr, id) => {
+ node_id = id;
+ self.check_expr(&expr);
+ let expr_ty = self.expr_ty(&expr);
+ saw_bot |= self.type_var_diverges(expr_ty);
+ saw_err |= expr_ty.references_error();
}
- hir::DeclItem(_) => {/* ignore for now */ }
}
- }
- hir::StmtExpr(ref expr, id) => {
- node_id = id;
- // Check with expected type of ()
- check_expr_has_type(fcx, &expr, fcx.tcx().mk_nil());
- let expr_ty = fcx.expr_ty(&expr);
- saw_bot = saw_bot || fcx.infcx().type_var_diverges(expr_ty);
- saw_err = saw_err || expr_ty.references_error();
- }
- hir::StmtSemi(ref expr, id) => {
- node_id = id;
- check_expr(fcx, &expr);
- let expr_ty = fcx.expr_ty(&expr);
- saw_bot |= fcx.infcx().type_var_diverges(expr_ty);
- saw_err |= expr_ty.references_error();
- }
- }
- if saw_bot {
- fcx.write_ty(node_id, fcx.infcx().next_diverging_ty_var());
- }
- else if saw_err {
- fcx.write_error(node_id);
- }
- else {
- fcx.write_nil(node_id)
+ if saw_bot {
+ self.write_ty(node_id, self.next_diverging_ty_var());
+ }
+ else if saw_err {
+ self.write_error(node_id);
+ }
+ else {
+ self.write_nil(node_id)
+ }
}
-}
-pub fn check_block_no_value<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, blk: &'tcx hir::Block) {
- check_block_with_expected(fcx, blk, ExpectHasType(fcx.tcx().mk_nil()));
- let blkty = fcx.node_ty(blk.id);
- if blkty.references_error() {
- fcx.write_error(blk.id);
- } else {
- let nilty = fcx.tcx().mk_nil();
- demand::suptype(fcx, blk.span, nilty, blkty);
+ pub fn check_block_no_value(&self, blk: &'gcx hir::Block) {
+ self.check_block_with_expected(blk, ExpectHasType(self.tcx.mk_nil()));
+ let blkty = self.node_ty(blk.id);
+ if blkty.references_error() {
+ self.write_error(blk.id);
+ } else {
+ let nilty = self.tcx.mk_nil();
+ self.demand_suptype(blk.span, nilty, blkty);
+ }
}
-}
-fn check_block_with_expected<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- blk: &'tcx hir::Block,
- expected: Expectation<'tcx>) {
- let prev = {
- let mut fcx_ps = fcx.ps.borrow_mut();
- let unsafety_state = fcx_ps.recurse(blk);
- replace(&mut *fcx_ps, unsafety_state)
- };
+ fn check_block_with_expected(&self,
+ blk: &'gcx hir::Block,
+ expected: Expectation<'tcx>) {
+ let prev = {
+ let mut fcx_ps = self.ps.borrow_mut();
+ let unsafety_state = fcx_ps.recurse(blk);
+ replace(&mut *fcx_ps, unsafety_state)
+ };
- let mut warned = false;
- let mut any_diverges = false;
- let mut any_err = false;
- for s in &blk.stmts {
- check_stmt(fcx, s);
- let s_id = s.node.id();
- let s_ty = fcx.node_ty(s_id);
- if any_diverges && !warned && match s.node {
- hir::StmtDecl(ref decl, _) => {
- match decl.node {
- hir::DeclLocal(_) => true,
- _ => false,
+ let mut warned = false;
+ let mut any_diverges = false;
+ let mut any_err = false;
+ for s in &blk.stmts {
+ self.check_stmt(s);
+ let s_id = s.node.id();
+ let s_ty = self.node_ty(s_id);
+ if any_diverges && !warned && match s.node {
+ hir::StmtDecl(ref decl, _) => {
+ match decl.node {
+ hir::DeclLocal(_) => true,
+ _ => false,
+ }
}
- }
- hir::StmtExpr(_, _) | hir::StmtSemi(_, _) => true,
- } {
- fcx.ccx
- .tcx
- .sess
- .add_lint(lint::builtin::UNREACHABLE_CODE,
- s_id,
- s.span,
- "unreachable statement".to_string());
- warned = true;
- }
- any_diverges = any_diverges || fcx.infcx().type_var_diverges(s_ty);
- any_err = any_err || s_ty.references_error();
- }
- match blk.expr {
- None => if any_err {
- fcx.write_error(blk.id);
- } else if any_diverges {
- fcx.write_ty(blk.id, fcx.infcx().next_diverging_ty_var());
- } else {
- fcx.write_nil(blk.id);
- },
- Some(ref e) => {
- if any_diverges && !warned {
- fcx.ccx
- .tcx
+ hir::StmtExpr(_, _) | hir::StmtSemi(_, _) => true,
+ } {
+ self.tcx
.sess
.add_lint(lint::builtin::UNREACHABLE_CODE,
- e.id,
- e.span,
- "unreachable expression".to_string());
+ s_id,
+ s.span,
+ "unreachable statement".to_string());
+ warned = true;
}
- let ety = match expected {
- ExpectHasType(ety) => {
- check_expr_coercable_to_type(fcx, &e, ety);
- ety
- }
- _ => {
- check_expr_with_expectation(fcx, &e, expected);
- fcx.expr_ty(&e)
- }
- };
-
- if any_err {
- fcx.write_error(blk.id);
+ any_diverges = any_diverges || self.type_var_diverges(s_ty);
+ any_err = any_err || s_ty.references_error();
+ }
+ match blk.expr {
+ None => if any_err {
+ self.write_error(blk.id);
} else if any_diverges {
- fcx.write_ty(blk.id, fcx.infcx().next_diverging_ty_var());
+ self.write_ty(blk.id, self.next_diverging_ty_var());
} else {
- fcx.write_ty(blk.id, ety);
- }
- }
- };
-
- *fcx.ps.borrow_mut() = prev;
-}
+ self.write_nil(blk.id);
+ },
+ Some(ref e) => {
+ if any_diverges && !warned {
+ self.tcx
+ .sess
+ .add_lint(lint::builtin::UNREACHABLE_CODE,
+ e.id,
+ e.span,
+ "unreachable expression".to_string());
+ }
+ let ety = match expected {
+ ExpectHasType(ety) => {
+ self.check_expr_coercable_to_type(&e, ety);
+ ety
+ }
+ _ => {
+ self.check_expr_with_expectation(&e, expected);
+ self.expr_ty(&e)
+ }
+ };
-/// Checks a constant appearing in a type. At the moment this is just the
-/// length expression in a fixed-length vector, but someday it might be
-/// extended to type-level numeric literals.
-fn check_const_in_type<'a,'tcx>(ccx: &'a CrateCtxt<'a,'tcx>,
- expr: &'tcx hir::Expr,
- expected_type: Ty<'tcx>) {
- let tables = RefCell::new(ty::Tables::empty());
- let inh = static_inherited_fields(ccx, &tables);
- let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(expected_type), expr.id);
- check_const_with_ty(&fcx, expr.span, expr, expected_type);
-}
+ if any_err {
+ self.write_error(blk.id);
+ } else if any_diverges {
+ self.write_ty(blk.id, self.next_diverging_ty_var());
+ } else {
+ self.write_ty(blk.id, ety);
+ }
+ }
+ };
-fn check_const<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
- sp: Span,
- e: &'tcx hir::Expr,
- id: ast::NodeId) {
- let tables = RefCell::new(ty::Tables::empty());
- let inh = static_inherited_fields(ccx, &tables);
- let rty = ccx.tcx.node_id_to_type(id);
- let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), e.id);
- let declty = fcx.ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(id)).ty;
- check_const_with_ty(&fcx, sp, e, declty);
-}
+ *self.ps.borrow_mut() = prev;
+ }
-fn check_const_with_ty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- _: Span,
- e: &'tcx hir::Expr,
- declty: Ty<'tcx>) {
- // Gather locals in statics (because of block expressions).
- // This is technically unnecessary because locals in static items are forbidden,
- // but prevents type checking from blowing up before const checking can properly
- // emit an error.
- GatherLocalsVisitor { fcx: fcx }.visit_expr(e);
-
- check_expr_with_hint(fcx, e, declty);
- demand::coerce(fcx, e.span, declty, e);
-
- fcx.select_all_obligations_and_apply_defaults();
- upvar::closure_analyze_const(&fcx, e);
- fcx.select_obligations_where_possible();
- fcx.check_casts();
- fcx.select_all_obligations_or_error();
-
- regionck::regionck_expr(fcx, e);
- writeback::resolve_type_vars_in_expr(fcx, e);
-}
-/// Checks whether a type can be represented in memory. In particular, it
-/// identifies types that contain themselves without indirection through a
-/// pointer, which would mean their size is unbounded.
-pub fn check_representable(tcx: &TyCtxt,
- sp: Span,
- item_id: ast::NodeId,
- _designation: &str) -> bool {
- let rty = tcx.node_id_to_type(item_id);
+ fn check_const_with_ty(&self,
+ _: Span,
+ e: &'gcx hir::Expr,
+ declty: Ty<'tcx>) {
+ // Gather locals in statics (because of block expressions).
+ // This is technically unnecessary because locals in static items are forbidden,
+ // but prevents type checking from blowing up before const checking can properly
+ // emit an error.
+ GatherLocalsVisitor { fcx: self }.visit_expr(e);
- // Check that it is possible to represent this type. This call identifies
- // (1) types that contain themselves and (2) types that contain a different
- // recursive type. It is only necessary to throw an error on those that
- // contain themselves. For case 2, there must be an inner type that will be
- // caught by case 1.
- match rty.is_representable(tcx, sp) {
- Representability::SelfRecursive => {
- let item_def_id = tcx.map.local_def_id(item_id);
- traits::recursive_type_with_infinite_size_error(tcx, item_def_id).emit();
- return false
- }
- Representability::Representable | Representability::ContainsRecursive => (),
- }
- return true
-}
+ self.check_expr_coercable_to_type(e, declty);
-pub fn check_simd(tcx: &TyCtxt, sp: Span, id: ast::NodeId) {
- let t = tcx.node_id_to_type(id);
- match t.sty {
- ty::TyStruct(def, substs) => {
- let fields = &def.struct_variant().fields;
- if fields.is_empty() {
- span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
- return;
+ self.select_all_obligations_and_apply_defaults();
+ self.closure_analyze_const(e);
+ self.select_obligations_where_possible();
+ self.check_casts();
+ self.select_all_obligations_or_error();
+
+ self.regionck_expr(e);
+ self.resolve_type_vars_in_expr(e);
+ }
+
+ // Returns the type parameter count and the type for the given definition.
+ fn type_scheme_and_predicates_for_def(&self,
+ sp: Span,
+ defn: Def)
+ -> (TypeScheme<'tcx>, GenericPredicates<'tcx>) {
+ match defn {
+ Def::Local(_, nid) | Def::Upvar(_, nid, _, _) => {
+ let typ = self.local_ty(sp, nid);
+ (ty::TypeScheme { generics: ty::Generics::empty(), ty: typ },
+ ty::GenericPredicates::empty())
}
- let e = fields[0].ty(tcx, substs);
- if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
- span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous");
- return;
+ Def::Fn(id) | Def::Method(id) |
+ Def::Static(id, _) | Def::Variant(_, id) |
+ Def::Struct(id) | Def::Const(id) | Def::AssociatedConst(id) => {
+ (self.tcx.lookup_item_type(id), self.tcx.lookup_predicates(id))
}
- match e.sty {
- ty::TyParam(_) => { /* struct<T>(T, T, T, T) is ok */ }
- _ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
- _ => {
- span_err!(tcx.sess, sp, E0077,
- "SIMD vector element type should be machine type");
- return;
- }
+ Def::Trait(_) |
+ Def::Enum(..) |
+ Def::TyAlias(..) |
+ Def::AssociatedTy(..) |
+ Def::PrimTy(_) |
+ Def::TyParam(..) |
+ Def::Mod(..) |
+ Def::ForeignMod(..) |
+ Def::Label(..) |
+ Def::SelfTy(..) |
+ Def::Err => {
+ span_bug!(sp, "expected value, found {:?}", defn);
}
}
- _ => ()
}
-}
-
-pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
- sp: Span,
- vs: &'tcx [hir::Variant],
- id: ast::NodeId) {
- fn do_check<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
- vs: &'tcx [hir::Variant],
- id: ast::NodeId,
- hint: attr::ReprAttr) {
- #![allow(trivial_numeric_casts)]
- let rty = ccx.tcx.node_id_to_type(id);
- let mut disr_vals: Vec<ty::Disr> = Vec::new();
-
- let tables = RefCell::new(ty::Tables::empty());
- let inh = static_inherited_fields(ccx, &tables);
- let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), id);
-
- let repr_type_ty = ccx.tcx.enum_repr_type(Some(&hint)).to_ty(&ccx.tcx);
- for v in vs {
- if let Some(ref e) = v.node.disr_expr {
- check_const_with_ty(&fcx, e.span, e, repr_type_ty);
+ // Instantiates the given path, which must refer to an item with the given
+ // number of type parameters and type.
+ pub fn instantiate_path(&self,
+ segments: &[hir::PathSegment],
+ type_scheme: TypeScheme<'tcx>,
+ type_predicates: &ty::GenericPredicates<'tcx>,
+ opt_self_ty: Option<Ty<'tcx>>,
+ def: Def,
+ span: Span,
+ node_id: ast::NodeId) {
+ debug!("instantiate_path(path={:?}, def={:?}, node_id={}, type_scheme={:?})",
+ segments,
+ def,
+ node_id,
+ type_scheme);
+
+ // We need to extract the type parameters supplied by the user in
+ // the path `path`. Due to the current setup, this is a bit of a
+ // tricky-process; the problem is that resolve only tells us the
+ // end-point of the path resolution, and not the intermediate steps.
+ // Luckily, we can (at least for now) deduce the intermediate steps
+ // just from the end-point.
+ //
+ // There are basically four cases to consider:
+ //
+ // 1. Reference to a *type*, such as a struct or enum:
+ //
+ // mod a { struct Foo<T> { ... } }
+ //
+ // Because we don't allow types to be declared within one
+ // another, a path that leads to a type will always look like
+ // `a::b::Foo<T>` where `a` and `b` are modules. This implies
+ // that only the final segment can have type parameters, and
+ // they are located in the TypeSpace.
+ //
+ // *Note:* Generally speaking, references to types don't
+ // actually pass through this function, but rather the
+ // `ast_ty_to_ty` function in `astconv`. However, in the case
+ // of struct patterns (and maybe literals) we do invoke
+ // `instantiate_path` to get the general type of an instance of
+ // a struct. (In these cases, there are actually no type
+ // parameters permitted at present, but perhaps we will allow
+ // them in the future.)
+ //
+ // 1b. Reference to an enum variant or tuple-like struct:
+ //
+ // struct foo<T>(...)
+ // enum E<T> { foo(...) }
+ //
+ // In these cases, the parameters are declared in the type
+ // space.
+ //
+ // 2. Reference to a *fn item*:
+ //
+ // fn foo<T>() { }
+ //
+ // In this case, the path will again always have the form
+ // `a::b::foo::<T>` where only the final segment should have
+ // type parameters. However, in this case, those parameters are
+ // declared on a value, and hence are in the `FnSpace`.
+ //
+ // 3. Reference to a *method*:
+ //
+ // impl<A> SomeStruct<A> {
+ // fn foo<B>(...)
+ // }
+ //
+ // Here we can have a path like
+ // `a::b::SomeStruct::<A>::foo::<B>`, in which case parameters
+ // may appear in two places. The penultimate segment,
+ // `SomeStruct::<A>`, contains parameters in TypeSpace, and the
+ // final segment, `foo::<B>` contains parameters in fn space.
+ //
+ // 4. Reference to an *associated const*:
+ //
+ // impl<A> AnotherStruct<A> {
+ // const FOO: B = BAR;
+ // }
+ //
+ // The path in this case will look like
+ // `a::b::AnotherStruct::<A>::FOO`, so the penultimate segment
+ // only will have parameters in TypeSpace.
+ //
+ // The first step then is to categorize the segments appropriately.
+
+ assert!(!segments.is_empty());
+
+ let mut ufcs_associated = None;
+ let mut segment_spaces: Vec<_>;
+ match def {
+ // Case 1 and 1b. Reference to a *type* or *enum variant*.
+ Def::SelfTy(..) |
+ Def::Struct(..) |
+ Def::Variant(..) |
+ Def::Enum(..) |
+ Def::TyAlias(..) |
+ Def::AssociatedTy(..) |
+ Def::Trait(..) |
+ Def::PrimTy(..) |
+ Def::TyParam(..) => {
+ // Everything but the final segment should have no
+ // parameters at all.
+ segment_spaces = vec![None; segments.len() - 1];
+ segment_spaces.push(Some(subst::TypeSpace));
}
- }
- let def_id = ccx.tcx.map.local_def_id(id);
+ // Case 2. Reference to a top-level value.
+ Def::Fn(..) |
+ Def::Const(..) |
+ Def::Static(..) => {
+ segment_spaces = vec![None; segments.len() - 1];
+ segment_spaces.push(Some(subst::FnSpace));
+ }
- let variants = &ccx.tcx.lookup_adt_def(def_id).variants;
- for (v, variant) in vs.iter().zip(variants.iter()) {
- let current_disr_val = variant.disr_val;
+ // Case 3. Reference to a method.
+ Def::Method(def_id) => {
+ let container = self.tcx.impl_or_trait_item(def_id).container();
+ match container {
+ ty::TraitContainer(trait_did) => {
+ callee::check_legal_trait_for_method_call(self.ccx, span, trait_did)
+ }
+ ty::ImplContainer(_) => {}
+ }
- // Check for duplicate discriminant values
- match disr_vals.iter().position(|&x| x == current_disr_val) {
- Some(i) => {
- let mut err = struct_span_err!(ccx.tcx.sess, v.span, E0081,
- "discriminant value `{}` already exists", disr_vals[i]);
- let variant_i_node_id = ccx.tcx.map.as_local_node_id(variants[i].did).unwrap();
- span_note!(&mut err, ccx.tcx.map.span(variant_i_node_id),
- "conflicting discriminant here");
- err.emit();
+ if segments.len() >= 2 {
+ segment_spaces = vec![None; segments.len() - 2];
+ segment_spaces.push(Some(subst::TypeSpace));
+ segment_spaces.push(Some(subst::FnSpace));
+ } else {
+ // `<T>::method` will end up here, and so can `T::method`.
+ let self_ty = opt_self_ty.expect("UFCS sugared method missing Self");
+ segment_spaces = vec![Some(subst::FnSpace)];
+ ufcs_associated = Some((container, self_ty));
}
- None => {}
}
- disr_vals.push(current_disr_val);
- }
- }
-
- let def_id = ccx.tcx.map.local_def_id(id);
- let hint = *ccx.tcx.lookup_repr_hints(def_id).get(0).unwrap_or(&attr::ReprAny);
- if hint != attr::ReprAny && vs.is_empty() {
- span_err!(ccx.tcx.sess, sp, E0084,
- "unsupported representation for zero-variant enum");
- }
-
- do_check(ccx, vs, id, hint);
-
- check_representable(ccx.tcx, sp, id, "enum");
-}
-
-// Returns the type parameter count and the type for the given definition.
-fn type_scheme_and_predicates_for_def<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- defn: Def)
- -> (TypeScheme<'tcx>, GenericPredicates<'tcx>) {
- match defn {
- Def::Local(_, nid) | Def::Upvar(_, nid, _, _) => {
- let typ = fcx.local_ty(sp, nid);
- (ty::TypeScheme { generics: ty::Generics::empty(), ty: typ },
- ty::GenericPredicates::empty())
- }
- Def::Fn(id) | Def::Method(id) |
- Def::Static(id, _) | Def::Variant(_, id) |
- Def::Struct(id) | Def::Const(id) | Def::AssociatedConst(id) => {
- (fcx.tcx().lookup_item_type(id), fcx.tcx().lookup_predicates(id))
- }
- Def::Trait(_) |
- Def::Enum(..) |
- Def::TyAlias(..) |
- Def::AssociatedTy(..) |
- Def::PrimTy(_) |
- Def::TyParam(..) |
- Def::Mod(..) |
- Def::ForeignMod(..) |
- Def::Label(..) |
- Def::SelfTy(..) |
- Def::Err => {
- span_bug!(sp, "expected value, found {:?}", defn);
- }
- }
-}
-
-// Instantiates the given path, which must refer to an item with the given
-// number of type parameters and type.
-pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- segments: &[hir::PathSegment],
- type_scheme: TypeScheme<'tcx>,
- type_predicates: &ty::GenericPredicates<'tcx>,
- opt_self_ty: Option<Ty<'tcx>>,
- def: Def,
- span: Span,
- node_id: ast::NodeId) {
- debug!("instantiate_path(path={:?}, def={:?}, node_id={}, type_scheme={:?})",
- segments,
- def,
- node_id,
- type_scheme);
-
- // We need to extract the type parameters supplied by the user in
- // the path `path`. Due to the current setup, this is a bit of a
- // tricky-process; the problem is that resolve only tells us the
- // end-point of the path resolution, and not the intermediate steps.
- // Luckily, we can (at least for now) deduce the intermediate steps
- // just from the end-point.
- //
- // There are basically four cases to consider:
- //
- // 1. Reference to a *type*, such as a struct or enum:
- //
- // mod a { struct Foo<T> { ... } }
- //
- // Because we don't allow types to be declared within one
- // another, a path that leads to a type will always look like
- // `a::b::Foo<T>` where `a` and `b` are modules. This implies
- // that only the final segment can have type parameters, and
- // they are located in the TypeSpace.
- //
- // *Note:* Generally speaking, references to types don't
- // actually pass through this function, but rather the
- // `ast_ty_to_ty` function in `astconv`. However, in the case
- // of struct patterns (and maybe literals) we do invoke
- // `instantiate_path` to get the general type of an instance of
- // a struct. (In these cases, there are actually no type
- // parameters permitted at present, but perhaps we will allow
- // them in the future.)
- //
- // 1b. Reference to an enum variant or tuple-like struct:
- //
- // struct foo<T>(...)
- // enum E<T> { foo(...) }
- //
- // In these cases, the parameters are declared in the type
- // space.
- //
- // 2. Reference to a *fn item*:
- //
- // fn foo<T>() { }
- //
- // In this case, the path will again always have the form
- // `a::b::foo::<T>` where only the final segment should have
- // type parameters. However, in this case, those parameters are
- // declared on a value, and hence are in the `FnSpace`.
- //
- // 3. Reference to a *method*:
- //
- // impl<A> SomeStruct<A> {
- // fn foo<B>(...)
- // }
- //
- // Here we can have a path like
- // `a::b::SomeStruct::<A>::foo::<B>`, in which case parameters
- // may appear in two places. The penultimate segment,
- // `SomeStruct::<A>`, contains parameters in TypeSpace, and the
- // final segment, `foo::<B>` contains parameters in fn space.
- //
- // 4. Reference to an *associated const*:
- //
- // impl<A> AnotherStruct<A> {
- // const FOO: B = BAR;
- // }
- //
- // The path in this case will look like
- // `a::b::AnotherStruct::<A>::FOO`, so the penultimate segment
- // only will have parameters in TypeSpace.
- //
- // The first step then is to categorize the segments appropriately.
-
- assert!(!segments.is_empty());
-
- let mut ufcs_associated = None;
- let mut segment_spaces: Vec<_>;
- match def {
- // Case 1 and 1b. Reference to a *type* or *enum variant*.
- Def::SelfTy(..) |
- Def::Struct(..) |
- Def::Variant(..) |
- Def::Enum(..) |
- Def::TyAlias(..) |
- Def::AssociatedTy(..) |
- Def::Trait(..) |
- Def::PrimTy(..) |
- Def::TyParam(..) => {
- // Everything but the final segment should have no
- // parameters at all.
- segment_spaces = vec![None; segments.len() - 1];
- segment_spaces.push(Some(subst::TypeSpace));
- }
-
- // Case 2. Reference to a top-level value.
- Def::Fn(..) |
- Def::Const(..) |
- Def::Static(..) => {
- segment_spaces = vec![None; segments.len() - 1];
- segment_spaces.push(Some(subst::FnSpace));
- }
+ Def::AssociatedConst(def_id) => {
+ let container = self.tcx.impl_or_trait_item(def_id).container();
+ match container {
+ ty::TraitContainer(trait_did) => {
+ callee::check_legal_trait_for_method_call(self.ccx, span, trait_did)
+ }
+ ty::ImplContainer(_) => {}
+ }
- // Case 3. Reference to a method.
- Def::Method(def_id) => {
- let container = fcx.tcx().impl_or_trait_item(def_id).container();
- match container {
- ty::TraitContainer(trait_did) => {
- callee::check_legal_trait_for_method_call(fcx.ccx, span, trait_did)
+ if segments.len() >= 2 {
+ segment_spaces = vec![None; segments.len() - 2];
+ segment_spaces.push(Some(subst::TypeSpace));
+ segment_spaces.push(None);
+ } else {
+ // `<T>::CONST` will end up here, and so can `T::CONST`.
+ let self_ty = opt_self_ty.expect("UFCS sugared const missing Self");
+ segment_spaces = vec![None];
+ ufcs_associated = Some((container, self_ty));
}
- ty::ImplContainer(_) => {}
}
- if segments.len() >= 2 {
- segment_spaces = vec![None; segments.len() - 2];
- segment_spaces.push(Some(subst::TypeSpace));
- segment_spaces.push(Some(subst::FnSpace));
- } else {
- // `<T>::method` will end up here, and so can `T::method`.
- let self_ty = opt_self_ty.expect("UFCS sugared method missing Self");
- segment_spaces = vec![Some(subst::FnSpace)];
- ufcs_associated = Some((container, self_ty));
+ // Other cases. Various nonsense that really shouldn't show up
+ // here. If they do, an error will have been reported
+ // elsewhere. (I hope)
+ Def::Mod(..) |
+ Def::ForeignMod(..) |
+ Def::Local(..) |
+ Def::Label(..) |
+ Def::Upvar(..) => {
+ segment_spaces = vec![None; segments.len()];
}
- }
- Def::AssociatedConst(def_id) => {
- let container = fcx.tcx().impl_or_trait_item(def_id).container();
- match container {
- ty::TraitContainer(trait_did) => {
- callee::check_legal_trait_for_method_call(fcx.ccx, span, trait_did)
- }
- ty::ImplContainer(_) => {}
+ Def::Err => {
+ self.set_tainted_by_errors();
+ segment_spaces = vec![None; segments.len()];
}
-
- if segments.len() >= 2 {
- segment_spaces = vec![None; segments.len() - 2];
- segment_spaces.push(Some(subst::TypeSpace));
- segment_spaces.push(None);
+ }
+ assert_eq!(segment_spaces.len(), segments.len());
+
+ // In `<T as Trait<A, B>>::method`, `A` and `B` are mandatory, but
+ // `opt_self_ty` can also be Some for `Foo::method`, where Foo's
+ // type parameters are not mandatory.
+ let require_type_space = opt_self_ty.is_some() && ufcs_associated.is_none();
+
+ debug!("segment_spaces={:?}", segment_spaces);
+
+ // Next, examine the definition, and determine how many type
+ // parameters we expect from each space.
+ let type_defs = &type_scheme.generics.types;
+ let region_defs = &type_scheme.generics.regions;
+
+ // Now that we have categorized what space the parameters for each
+ // segment belong to, let's sort out the parameters that the user
+ // provided (if any) into their appropriate spaces. We'll also report
+ // errors if type parameters are provided in an inappropriate place.
+ let mut substs = Substs::empty();
+ for (&opt_space, segment) in segment_spaces.iter().zip(segments) {
+ if let Some(space) = opt_space {
+ self.push_explicit_parameters_from_segment_to_substs(space,
+ span,
+ type_defs,
+ region_defs,
+ segment,
+ &mut substs);
} else {
- // `<T>::CONST` will end up here, and so can `T::CONST`.
- let self_ty = opt_self_ty.expect("UFCS sugared const missing Self");
- segment_spaces = vec![None];
- ufcs_associated = Some((container, self_ty));
+ self.tcx.prohibit_type_params(slice::ref_slice(segment));
}
}
-
- // Other cases. Various nonsense that really shouldn't show up
- // here. If they do, an error will have been reported
- // elsewhere. (I hope)
- Def::Mod(..) |
- Def::ForeignMod(..) |
- Def::Local(..) |
- Def::Label(..) |
- Def::Upvar(..) => {
- segment_spaces = vec![None; segments.len()];
- }
-
- Def::Err => {
- fcx.infcx().set_tainted_by_errors();
- segment_spaces = vec![None; segments.len()];
+ if let Some(self_ty) = opt_self_ty {
+ if type_defs.len(subst::SelfSpace) == 1 {
+ substs.types.push(subst::SelfSpace, self_ty);
+ }
}
- }
- assert_eq!(segment_spaces.len(), segments.len());
- // In `<T as Trait<A, B>>::method`, `A` and `B` are mandatory, but
- // `opt_self_ty` can also be Some for `Foo::method`, where Foo's
- // type parameters are not mandatory.
- let require_type_space = opt_self_ty.is_some() && ufcs_associated.is_none();
+ // Now we have to compare the types that the user *actually*
+ // provided against the types that were *expected*. If the user
+ // did not provide any types, then we want to substitute inference
+ // variables. If the user provided some types, we may still need
+ // to add defaults. If the user provided *too many* types, that's
+ // a problem.
+ for &space in &[subst::SelfSpace, subst::TypeSpace, subst::FnSpace] {
+ self.adjust_type_parameters(span, space, type_defs,
+ require_type_space, &mut substs);
+ assert_eq!(substs.types.len(space), type_defs.len(space));
- debug!("segment_spaces={:?}", segment_spaces);
+ self.adjust_region_parameters(span, space, region_defs, &mut substs);
+ assert_eq!(substs.regions.len(space), region_defs.len(space));
+ }
- // Next, examine the definition, and determine how many type
- // parameters we expect from each space.
- let type_defs = &type_scheme.generics.types;
- let region_defs = &type_scheme.generics.regions;
+ // The things we are substituting into the type should not contain
+ // escaping late-bound regions, and nor should the base type scheme.
+ let substs = self.tcx.mk_substs(substs);
+ assert!(!substs.has_regions_escaping_depth(0));
+ assert!(!type_scheme.has_escaping_regions());
- // Now that we have categorized what space the parameters for each
- // segment belong to, let's sort out the parameters that the user
- // provided (if any) into their appropriate spaces. We'll also report
- // errors if type parameters are provided in an inappropriate place.
- let mut substs = Substs::empty();
- for (opt_space, segment) in segment_spaces.iter().zip(segments) {
- match *opt_space {
- None => {
- prohibit_type_params(fcx.tcx(), slice::ref_slice(segment));
- }
+ // Add all the obligations that are required, substituting and
+ // normalized appropriately.
+ let bounds = self.instantiate_bounds(span, &substs, &type_predicates);
+ self.add_obligations_for_parameters(
+ traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def.def_id())),
+ &bounds);
- Some(space) => {
- push_explicit_parameters_from_segment_to_substs(fcx,
- space,
- span,
- type_defs,
- region_defs,
- segment,
- &mut substs);
+ // Substitute the values for the type parameters into the type of
+ // the referenced item.
+ let ty_substituted = self.instantiate_type_scheme(span, &substs, &type_scheme.ty);
+
+
+ if let Some((ty::ImplContainer(impl_def_id), self_ty)) = ufcs_associated {
+ // In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
+ // is inherent, there is no `Self` parameter, instead, the impl needs
+ // type parameters, which we can infer by unifying the provided `Self`
+ // with the substituted impl type.
+ let impl_scheme = self.tcx.lookup_item_type(impl_def_id);
+ assert_eq!(substs.types.len(subst::TypeSpace),
+ impl_scheme.generics.types.len(subst::TypeSpace));
+ assert_eq!(substs.regions.len(subst::TypeSpace),
+ impl_scheme.generics.regions.len(subst::TypeSpace));
+
+ let impl_ty = self.instantiate_type_scheme(span, &substs, &impl_scheme.ty);
+ match self.sub_types(false, TypeOrigin::Misc(span), self_ty, impl_ty) {
+ Ok(InferOk { obligations, .. }) => {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ }
+ Err(_) => {
+ span_bug!(span,
+ "instantiate_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
+ self_ty,
+ impl_ty);
+ }
}
}
- }
- if let Some(self_ty) = opt_self_ty {
- if type_defs.len(subst::SelfSpace) == 1 {
- substs.types.push(subst::SelfSpace, self_ty);
- }
- }
- // Now we have to compare the types that the user *actually*
- // provided against the types that were *expected*. If the user
- // did not provide any types, then we want to substitute inference
- // variables. If the user provided some types, we may still need
- // to add defaults. If the user provided *too many* types, that's
- // a problem.
- for &space in &[subst::SelfSpace, subst::TypeSpace, subst::FnSpace] {
- adjust_type_parameters(fcx, span, space, type_defs,
- require_type_space, &mut substs);
- assert_eq!(substs.types.len(space), type_defs.len(space));
-
- adjust_region_parameters(fcx, span, space, region_defs, &mut substs);
- assert_eq!(substs.regions.len(space), region_defs.len(space));
- }
-
- // The things we are substituting into the type should not contain
- // escaping late-bound regions, and nor should the base type scheme.
- assert!(!substs.has_regions_escaping_depth(0));
- assert!(!type_scheme.has_escaping_regions());
-
- // Add all the obligations that are required, substituting and
- // normalized appropriately.
- let bounds = fcx.instantiate_bounds(span, &substs, &type_predicates);
- fcx.add_obligations_for_parameters(
- traits::ObligationCause::new(span, fcx.body_id, traits::ItemObligation(def.def_id())),
- &bounds);
-
- // Substitute the values for the type parameters into the type of
- // the referenced item.
- let ty_substituted = fcx.instantiate_type_scheme(span, &substs, &type_scheme.ty);
-
-
- if let Some((ty::ImplContainer(impl_def_id), self_ty)) = ufcs_associated {
- // In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
- // is inherent, there is no `Self` parameter, instead, the impl needs
- // type parameters, which we can infer by unifying the provided `Self`
- // with the substituted impl type.
- let impl_scheme = fcx.tcx().lookup_item_type(impl_def_id);
- assert_eq!(substs.types.len(subst::TypeSpace),
- impl_scheme.generics.types.len(subst::TypeSpace));
- assert_eq!(substs.regions.len(subst::TypeSpace),
- impl_scheme.generics.regions.len(subst::TypeSpace));
-
- let impl_ty = fcx.instantiate_type_scheme(span, &substs, &impl_scheme.ty);
- if fcx.mk_subty(false, TypeOrigin::Misc(span), self_ty, impl_ty).is_err() {
- span_bug!(span,
- "instantiate_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
- self_ty,
- impl_ty);
- }
+ debug!("instantiate_path: type of {:?} is {:?}",
+ node_id,
+ ty_substituted);
+ self.write_ty(node_id, ty_substituted);
+ self.write_substs(node_id, ty::ItemSubsts {
+ substs: substs
+ });
}
- debug!("instantiate_path: type of {:?} is {:?}",
- node_id,
- ty_substituted);
- fcx.write_ty(node_id, ty_substituted);
- fcx.write_substs(node_id, ty::ItemSubsts { substs: substs });
- return;
-
/// Finds the parameters that the user provided and adds them to `substs`. If too many
/// parameters are provided, then reports an error and clears the output vector.
///
/// Note that we *do not* check for *too few* parameters here. Due to the presence of defaults
/// etc that is more complicated. I wanted however to do the reporting of *too many* parameters
/// here because we can easily use the precise span of the N+1'th parameter.
- fn push_explicit_parameters_from_segment_to_substs<'a, 'tcx>(
- fcx: &FnCtxt<'a, 'tcx>,
+ fn push_explicit_parameters_from_segment_to_substs(&self,
space: subst::ParamSpace,
span: Span,
type_defs: &VecPerParamSpace<ty::TypeParameterDef<'tcx>>,
{
match segment.parameters {
hir::AngleBracketedParameters(ref data) => {
- push_explicit_angle_bracketed_parameters_from_segment_to_substs(
- fcx, space, type_defs, region_defs, data, substs);
+ self.push_explicit_angle_bracketed_parameters_from_segment_to_substs(
+ space, type_defs, region_defs, data, substs);
}
hir::ParenthesizedParameters(ref data) => {
- span_err!(fcx.tcx().sess, span, E0238,
+ span_err!(self.tcx.sess, span, E0238,
"parenthesized parameters may only be used with a trait");
- push_explicit_parenthesized_parameters_from_segment_to_substs(
- fcx, space, span, type_defs, data, substs);
+ self.push_explicit_parenthesized_parameters_from_segment_to_substs(
+ space, span, type_defs, data, substs);
}
}
}
- fn push_explicit_angle_bracketed_parameters_from_segment_to_substs<'a, 'tcx>(
- fcx: &FnCtxt<'a, 'tcx>,
+ fn push_explicit_angle_bracketed_parameters_from_segment_to_substs(&self,
space: subst::ParamSpace,
type_defs: &VecPerParamSpace<ty::TypeParameterDef<'tcx>>,
region_defs: &VecPerParamSpace<ty::RegionParameterDef>,
let type_count = type_defs.len(space);
assert_eq!(substs.types.len(space), 0);
for (i, typ) in data.types.iter().enumerate() {
- let t = fcx.to_ty(&typ);
+ let t = self.to_ty(&typ);
if i < type_count {
substs.types.push(space, t);
} else if i == type_count {
- span_err!(fcx.tcx().sess, typ.span, E0087,
+ span_err!(self.tcx.sess, typ.span, E0087,
"too many type parameters provided: \
expected at most {} parameter{}, \
found {} parameter{}",
}
if !data.bindings.is_empty() {
- span_err!(fcx.tcx().sess, data.bindings[0].span, E0182,
+ span_err!(self.tcx.sess, data.bindings[0].span, E0182,
"unexpected binding of associated item in expression path \
(only allowed in type paths)");
}
let region_count = region_defs.len(space);
assert_eq!(substs.regions.len(space), 0);
for (i, lifetime) in data.lifetimes.iter().enumerate() {
- let r = ast_region_to_region(fcx.tcx(), lifetime);
+ let r = ast_region_to_region(self.tcx, lifetime);
if i < region_count {
substs.regions.push(space, r);
} else if i == region_count {
- span_err!(fcx.tcx().sess, lifetime.span, E0088,
+ span_err!(self.tcx.sess, lifetime.span, E0088,
"too many lifetime parameters provided: \
expected {} parameter{}, found {} parameter{}",
region_count,
/// roughly the same thing as `Foo<(A,B),C>`. One important
/// difference has to do with the treatment of anonymous
/// regions, which are translated into bound regions (NYI).
- fn push_explicit_parenthesized_parameters_from_segment_to_substs<'a, 'tcx>(
- fcx: &FnCtxt<'a, 'tcx>,
+ fn push_explicit_parenthesized_parameters_from_segment_to_substs(&self,
space: subst::ParamSpace,
span: Span,
type_defs: &VecPerParamSpace<ty::TypeParameterDef<'tcx>>,
{
let type_count = type_defs.len(space);
if type_count < 2 {
- span_err!(fcx.tcx().sess, span, E0167,
+ span_err!(self.tcx.sess, span, E0167,
"parenthesized form always supplies 2 type parameters, \
but only {} parameter(s) were expected",
type_count);
}
let input_tys: Vec<Ty> =
- data.inputs.iter().map(|ty| fcx.to_ty(&ty)).collect();
+ data.inputs.iter().map(|ty| self.to_ty(&ty)).collect();
- let tuple_ty = fcx.tcx().mk_tup(input_tys);
+ let tuple_ty = self.tcx.mk_tup(input_tys);
if type_count >= 1 {
substs.types.push(space, tuple_ty);
}
let output_ty: Option<Ty> =
- data.output.as_ref().map(|ty| fcx.to_ty(&ty));
+ data.output.as_ref().map(|ty| self.to_ty(&ty));
let output_ty =
- output_ty.unwrap_or(fcx.tcx().mk_nil());
+ output_ty.unwrap_or(self.tcx.mk_nil());
if type_count >= 2 {
substs.types.push(space, output_ty);
}
}
- fn adjust_type_parameters<'a, 'tcx>(
- fcx: &FnCtxt<'a, 'tcx>,
+ fn adjust_type_parameters(&self,
span: Span,
space: ParamSpace,
defs: &VecPerParamSpace<ty::TypeParameterDef<'tcx>>,
// everything.
if provided_len == 0 && !(require_type_space && space == subst::TypeSpace) {
substs.types.replace(space, Vec::new());
- fcx.infcx().type_vars_for_defs(span, space, substs, &desired[..]);
+ self.type_vars_for_defs(span, space, substs, &desired[..]);
return;
}
if provided_len < required_len {
let qualifier =
if desired.len() != required_len { "at least " } else { "" };
- span_err!(fcx.tcx().sess, span, E0089,
+ span_err!(self.tcx.sess, span, E0089,
"too few type parameters provided: expected {}{} parameter{}, \
found {} parameter{}",
qualifier, required_len,
if required_len == 1 {""} else {"s"},
provided_len,
if provided_len == 1 {""} else {"s"});
- substs.types.replace(space, vec![fcx.tcx().types.err; desired.len()]);
+ substs.types.replace(space, vec![self.tcx.types.err; desired.len()]);
return;
}
// partial substitution that we have built up.
for i in provided_len..desired.len() {
let default = desired[i].default.unwrap();
- let default = default.subst_spanned(fcx.tcx(), substs, Some(span));
+ let default = default.subst_spanned(self.tcx, substs, Some(span));
substs.types.push(space, default);
}
assert_eq!(substs.types.len(space), desired.len());
debug!("Final substs: {:?}", substs);
}
- fn adjust_region_parameters(
- fcx: &FnCtxt,
+ fn adjust_region_parameters(&self,
span: Span,
space: ParamSpace,
defs: &VecPerParamSpace<ty::RegionParameterDef>,
if provided_len == 0 {
substs.regions.replace(
space,
- fcx.infcx().region_vars_for_defs(span, desired));
+ self.region_vars_for_defs(span, desired));
return;
}
// Otherwise, too few were provided. Report an error and then
// use inference variables.
- span_err!(fcx.tcx().sess, span, E0090,
+ span_err!(self.tcx.sess, span, E0090,
"too few lifetime parameters provided: expected {} parameter{}, \
found {} parameter{}",
desired.len(),
substs.regions.replace(
space,
- fcx.infcx().region_vars_for_defs(span, desired));
+ self.region_vars_for_defs(span, desired));
}
-}
-fn structurally_resolve_type_or_else<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- ty: Ty<'tcx>,
- f: F) -> Ty<'tcx>
- where F: Fn() -> Ty<'tcx>
-{
- let mut ty = fcx.resolve_type_vars_if_possible(ty);
+ fn structurally_resolve_type_or_else<F>(&self, sp: Span, ty: Ty<'tcx>, f: F)
+ -> Ty<'tcx>
+ where F: Fn() -> Ty<'tcx>
+ {
+ let mut ty = self.resolve_type_vars_with_obligations(ty);
- if ty.is_ty_var() {
- let alternative = f();
+ if ty.is_ty_var() {
+ let alternative = f();
- // If not, error.
- if alternative.is_ty_var() || alternative.references_error() {
- if !fcx.infcx().is_tainted_by_errors() {
- fcx.type_error_message(sp, |_actual| {
- "the type of this value must be known in this context".to_string()
- }, ty, None);
+ // If not, error.
+ if alternative.is_ty_var() || alternative.references_error() {
+ if !self.is_tainted_by_errors() {
+ self.type_error_message(sp, |_actual| {
+ "the type of this value must be known in this context".to_string()
+ }, ty, None);
+ }
+ self.demand_suptype(sp, self.tcx.types.err, ty);
+ ty = self.tcx.types.err;
+ } else {
+ self.demand_suptype(sp, alternative, ty);
+ ty = alternative;
}
- demand::suptype(fcx, sp, fcx.tcx().types.err, ty);
- ty = fcx.tcx().types.err;
- } else {
- demand::suptype(fcx, sp, alternative, ty);
- ty = alternative;
}
- }
- ty
-}
+ ty
+ }
-// Resolves `typ` by a single level if `typ` is a type variable. If no
-// resolution is possible, then an error is reported.
-pub fn structurally_resolved_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- sp: Span,
- ty: Ty<'tcx>)
- -> Ty<'tcx>
-{
- structurally_resolve_type_or_else(fcx, sp, ty, || {
- fcx.tcx().types.err
- })
+ // Resolves `typ` by a single level if `typ` is a type variable. If no
+ // resolution is possible, then an error is reported.
+ pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.structurally_resolve_type_or_else(sp, ty, || {
+ self.tcx.types.err
+ })
+ }
}
// Returns true if b contains a break that can exit from b
-pub fn may_break(cx: &TyCtxt, id: ast::NodeId, b: &hir::Block) -> bool {
+pub fn may_break(tcx: TyCtxt, id: ast::NodeId, b: &hir::Block) -> bool {
// First: is there an unlabeled break immediately
// inside the loop?
(loop_query(&b, |e| {
// <id> nested anywhere inside the loop?
(block_query(b, |e| {
if let hir::ExprBreak(Some(_)) = e.node {
- lookup_full_def(cx, e.span, e.id) == Def::Label(id)
+ lookup_full_def(tcx, e.span, e.id) == Def::Label(id)
} else {
false
}
//! Code related to processing overloaded binary and unary operators.
-use super::{
- check_expr,
- check_expr_coercable_to_type,
- check_expr_with_lvalue_pref,
- demand,
- method,
- FnCtxt,
-};
+use super::FnCtxt;
use hir::def_id::DefId;
use rustc::ty::{Ty, TypeFoldable, PreferMutLvalue};
use syntax::ast;
use syntax::parse::token;
use rustc::hir;
-/// Check a `a <op>= b`
-pub fn check_binop_assign<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- expr: &'tcx hir::Expr,
- op: hir::BinOp,
- lhs_expr: &'tcx hir::Expr,
- rhs_expr: &'tcx hir::Expr)
-{
- check_expr_with_lvalue_pref(fcx, lhs_expr, PreferMutLvalue);
-
- let lhs_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(lhs_expr));
- let (rhs_ty, return_ty) =
- check_overloaded_binop(fcx, expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::Yes);
- let rhs_ty = fcx.resolve_type_vars_if_possible(rhs_ty);
-
- if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) {
- enforce_builtin_binop_types(fcx, lhs_expr, lhs_ty, rhs_expr, rhs_ty, op);
- fcx.write_nil(expr.id);
- } else {
- fcx.write_ty(expr.id, return_ty);
- }
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ /// Check a `a <op>= b`
+ pub fn check_binop_assign(&self,
+ expr: &'gcx hir::Expr,
+ op: hir::BinOp,
+ lhs_expr: &'gcx hir::Expr,
+ rhs_expr: &'gcx hir::Expr)
+ {
+ self.check_expr_with_lvalue_pref(lhs_expr, PreferMutLvalue);
+
+ let lhs_ty = self.resolve_type_vars_with_obligations(self.expr_ty(lhs_expr));
+ let (rhs_ty, return_ty) =
+ self.check_overloaded_binop(expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::Yes);
+ let rhs_ty = self.resolve_type_vars_with_obligations(rhs_ty);
+
+ if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) {
+ self.enforce_builtin_binop_types(lhs_expr, lhs_ty, rhs_expr, rhs_ty, op);
+ self.write_nil(expr.id);
+ } else {
+ self.write_ty(expr.id, return_ty);
+ }
- let tcx = fcx.tcx();
- if !tcx.expr_is_lval(lhs_expr) {
- span_err!(tcx.sess, lhs_expr.span, E0067, "invalid left-hand side expression");
+ let tcx = self.tcx;
+ if !tcx.expr_is_lval(lhs_expr) {
+ span_err!(tcx.sess, lhs_expr.span, E0067, "invalid left-hand side expression");
+ }
}
-}
-
-/// Check a potentially overloaded binary operator.
-pub fn check_binop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- op: hir::BinOp,
- lhs_expr: &'tcx hir::Expr,
- rhs_expr: &'tcx hir::Expr)
-{
- let tcx = fcx.ccx.tcx;
- debug!("check_binop(expr.id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})",
- expr.id,
- expr,
- op,
- lhs_expr,
- rhs_expr);
-
- check_expr(fcx, lhs_expr);
- let lhs_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(lhs_expr));
-
- match BinOpCategory::from(op) {
- BinOpCategory::Shortcircuit => {
- // && and || are a simple case.
- demand::suptype(fcx, lhs_expr.span, tcx.mk_bool(), lhs_ty);
- check_expr_coercable_to_type(fcx, rhs_expr, tcx.mk_bool());
- fcx.write_ty(expr.id, tcx.mk_bool());
- }
- _ => {
- // Otherwise, we always treat operators as if they are
- // overloaded. This is the way to be most flexible w/r/t
- // types that get inferred.
- let (rhs_ty, return_ty) =
- check_overloaded_binop(fcx, expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::No);
-
- // Supply type inference hints if relevant. Probably these
- // hints should be enforced during select as part of the
- // `consider_unification_despite_ambiguity` routine, but this
- // more convenient for now.
- //
- // The basic idea is to help type inference by taking
- // advantage of things we know about how the impls for
- // scalar types are arranged. This is important in a
- // scenario like `1_u32 << 2`, because it lets us quickly
- // deduce that the result type should be `u32`, even
- // though we don't know yet what type 2 has and hence
- // can't pin this down to a specific impl.
- let rhs_ty = fcx.resolve_type_vars_if_possible(rhs_ty);
- if
- !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() &&
- is_builtin_binop(lhs_ty, rhs_ty, op)
- {
- let builtin_return_ty =
- enforce_builtin_binop_types(fcx, lhs_expr, lhs_ty, rhs_expr, rhs_ty, op);
- demand::suptype(fcx, expr.span, builtin_return_ty, return_ty);
+ /// Check a potentially overloaded binary operator.
+ pub fn check_binop(&self,
+ expr: &'gcx hir::Expr,
+ op: hir::BinOp,
+ lhs_expr: &'gcx hir::Expr,
+ rhs_expr: &'gcx hir::Expr)
+ {
+ let tcx = self.tcx;
+
+ debug!("check_binop(expr.id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})",
+ expr.id,
+ expr,
+ op,
+ lhs_expr,
+ rhs_expr);
+
+ self.check_expr(lhs_expr);
+ let lhs_ty = self.resolve_type_vars_with_obligations(self.expr_ty(lhs_expr));
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ // && and || are a simple case.
+ self.demand_suptype(lhs_expr.span, tcx.mk_bool(), lhs_ty);
+ self.check_expr_coercable_to_type(rhs_expr, tcx.mk_bool());
+ self.write_ty(expr.id, tcx.mk_bool());
}
+ _ => {
+ // Otherwise, we always treat operators as if they are
+ // overloaded. This is the way to be most flexible w/r/t
+ // types that get inferred.
+ let (rhs_ty, return_ty) =
+ self.check_overloaded_binop(expr, lhs_expr, lhs_ty,
+ rhs_expr, op, IsAssign::No);
+
+ // Supply type inference hints if relevant. Probably these
+ // hints should be enforced during select as part of the
+ // `consider_unification_despite_ambiguity` routine, but this
+ // more convenient for now.
+ //
+ // The basic idea is to help type inference by taking
+ // advantage of things we know about how the impls for
+ // scalar types are arranged. This is important in a
+ // scenario like `1_u32 << 2`, because it lets us quickly
+ // deduce that the result type should be `u32`, even
+ // though we don't know yet what type 2 has and hence
+ // can't pin this down to a specific impl.
+ let rhs_ty = self.resolve_type_vars_with_obligations(rhs_ty);
+ if
+ !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() &&
+ is_builtin_binop(lhs_ty, rhs_ty, op)
+ {
+ let builtin_return_ty =
+ self.enforce_builtin_binop_types(lhs_expr, lhs_ty, rhs_expr, rhs_ty, op);
+ self.demand_suptype(expr.span, builtin_return_ty, return_ty);
+ }
- fcx.write_ty(expr.id, return_ty);
+ self.write_ty(expr.id, return_ty);
+ }
}
}
-}
-fn enforce_builtin_binop_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- lhs_expr: &'tcx hir::Expr,
- lhs_ty: Ty<'tcx>,
- rhs_expr: &'tcx hir::Expr,
- rhs_ty: Ty<'tcx>,
- op: hir::BinOp)
- -> Ty<'tcx>
-{
- debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op));
-
- let tcx = fcx.tcx();
- match BinOpCategory::from(op) {
- BinOpCategory::Shortcircuit => {
- demand::suptype(fcx, lhs_expr.span, tcx.mk_bool(), lhs_ty);
- demand::suptype(fcx, rhs_expr.span, tcx.mk_bool(), rhs_ty);
- tcx.mk_bool()
- }
+ fn enforce_builtin_binop_types(&self,
+ lhs_expr: &'gcx hir::Expr,
+ lhs_ty: Ty<'tcx>,
+ rhs_expr: &'gcx hir::Expr,
+ rhs_ty: Ty<'tcx>,
+ op: hir::BinOp)
+ -> Ty<'tcx>
+ {
+ debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op));
+
+ let tcx = self.tcx;
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ self.demand_suptype(lhs_expr.span, tcx.mk_bool(), lhs_ty);
+ self.demand_suptype(rhs_expr.span, tcx.mk_bool(), rhs_ty);
+ tcx.mk_bool()
+ }
- BinOpCategory::Shift => {
- // result type is same as LHS always
- lhs_ty
- }
+ BinOpCategory::Shift => {
+ // result type is same as LHS always
+ lhs_ty
+ }
- BinOpCategory::Math |
- BinOpCategory::Bitwise => {
- // both LHS and RHS and result will have the same type
- demand::suptype(fcx, rhs_expr.span, lhs_ty, rhs_ty);
- lhs_ty
- }
+ BinOpCategory::Math |
+ BinOpCategory::Bitwise => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_expr.span, lhs_ty, rhs_ty);
+ lhs_ty
+ }
- BinOpCategory::Comparison => {
- // both LHS and RHS and result will have the same type
- demand::suptype(fcx, rhs_expr.span, lhs_ty, rhs_ty);
- tcx.mk_bool()
+ BinOpCategory::Comparison => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_expr.span, lhs_ty, rhs_ty);
+ tcx.mk_bool()
+ }
}
}
-}
-fn check_overloaded_binop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- lhs_expr: &'tcx hir::Expr,
- lhs_ty: Ty<'tcx>,
- rhs_expr: &'tcx hir::Expr,
- op: hir::BinOp,
- is_assign: IsAssign)
- -> (Ty<'tcx>, Ty<'tcx>)
-{
- debug!("check_overloaded_binop(expr.id={}, lhs_ty={:?}, is_assign={:?})",
- expr.id,
- lhs_ty,
- is_assign);
-
- let (name, trait_def_id) = name_and_trait_def_id(fcx, op, is_assign);
-
- // NB: As we have not yet type-checked the RHS, we don't have the
- // type at hand. Make a variable to represent it. The whole reason
- // for this indirection is so that, below, we can check the expr
- // using this variable as the expected type, which sometimes lets
- // us do better coercions than we would be able to do otherwise,
- // particularly for things like `String + &String`.
- let rhs_ty_var = fcx.infcx().next_ty_var();
-
- let return_ty = match lookup_op_method(fcx, expr, lhs_ty, vec![rhs_ty_var],
- token::intern(name), trait_def_id,
- lhs_expr) {
- Ok(return_ty) => return_ty,
- Err(()) => {
- // error types are considered "builtin"
- if !lhs_ty.references_error() {
- if let IsAssign::Yes = is_assign {
- span_err!(fcx.tcx().sess, lhs_expr.span, E0368,
- "binary assignment operation `{}=` cannot be applied to type `{}`",
- op.node.as_str(),
- lhs_ty);
- } else {
- let mut err = struct_span_err!(fcx.tcx().sess, lhs_expr.span, E0369,
- "binary operation `{}` cannot be applied to type `{}`",
- op.node.as_str(),
- lhs_ty);
- let missing_trait = match op.node {
- hir::BiAdd => Some("std::ops::Add"),
- hir::BiSub => Some("std::ops::Sub"),
- hir::BiMul => Some("std::ops::Mul"),
- hir::BiDiv => Some("std::ops::Div"),
- hir::BiRem => Some("std::ops::Rem"),
- hir::BiBitAnd => Some("std::ops::BitAnd"),
- hir::BiBitOr => Some("std::ops::BitOr"),
- hir::BiShl => Some("std::ops::Shl"),
- hir::BiShr => Some("std::ops::Shr"),
- hir::BiEq | hir::BiNe => Some("std::cmp::PartialEq"),
- hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe =>
- Some("std::cmp::PartialOrd"),
- _ => None
- };
-
- if let Some(missing_trait) = missing_trait {
- span_note!(&mut err, lhs_expr.span,
- "an implementation of `{}` might be missing for `{}`",
- missing_trait, lhs_ty);
+ fn check_overloaded_binop(&self,
+ expr: &'gcx hir::Expr,
+ lhs_expr: &'gcx hir::Expr,
+ lhs_ty: Ty<'tcx>,
+ rhs_expr: &'gcx hir::Expr,
+ op: hir::BinOp,
+ is_assign: IsAssign)
+ -> (Ty<'tcx>, Ty<'tcx>)
+ {
+ debug!("check_overloaded_binop(expr.id={}, lhs_ty={:?}, is_assign={:?})",
+ expr.id,
+ lhs_ty,
+ is_assign);
+
+ let (name, trait_def_id) = self.name_and_trait_def_id(op, is_assign);
+
+ // NB: As we have not yet type-checked the RHS, we don't have the
+ // type at hand. Make a variable to represent it. The whole reason
+ // for this indirection is so that, below, we can check the expr
+ // using this variable as the expected type, which sometimes lets
+ // us do better coercions than we would be able to do otherwise,
+ // particularly for things like `String + &String`.
+ let rhs_ty_var = self.next_ty_var();
+
+ let return_ty = match self.lookup_op_method(expr, lhs_ty, vec![rhs_ty_var],
+ token::intern(name), trait_def_id,
+ lhs_expr) {
+ Ok(return_ty) => return_ty,
+ Err(()) => {
+ // error types are considered "builtin"
+ if !lhs_ty.references_error() {
+ if let IsAssign::Yes = is_assign {
+ span_err!(self.tcx.sess, lhs_expr.span, E0368,
+ "binary assignment operation `{}=` \
+ cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty);
+ } else {
+ let mut err = struct_span_err!(self.tcx.sess, lhs_expr.span, E0369,
+ "binary operation `{}` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty);
+ let missing_trait = match op.node {
+ hir::BiAdd => Some("std::ops::Add"),
+ hir::BiSub => Some("std::ops::Sub"),
+ hir::BiMul => Some("std::ops::Mul"),
+ hir::BiDiv => Some("std::ops::Div"),
+ hir::BiRem => Some("std::ops::Rem"),
+ hir::BiBitAnd => Some("std::ops::BitAnd"),
+ hir::BiBitOr => Some("std::ops::BitOr"),
+ hir::BiShl => Some("std::ops::Shl"),
+ hir::BiShr => Some("std::ops::Shr"),
+ hir::BiEq | hir::BiNe => Some("std::cmp::PartialEq"),
+ hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe =>
+ Some("std::cmp::PartialOrd"),
+ _ => None
+ };
+
+ if let Some(missing_trait) = missing_trait {
+ span_note!(&mut err, lhs_expr.span,
+ "an implementation of `{}` might be missing for `{}`",
+ missing_trait, lhs_ty);
+ }
+ err.emit();
}
- err.emit();
}
+ self.tcx.types.err
}
- fcx.tcx().types.err
- }
- };
+ };
- // see `NB` above
- check_expr_coercable_to_type(fcx, rhs_expr, rhs_ty_var);
+ // see `NB` above
+ self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var);
- (rhs_ty_var, return_ty)
-}
+ (rhs_ty_var, return_ty)
+ }
-pub fn check_user_unop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- op_str: &str,
- mname: &str,
- trait_did: Option<DefId>,
- ex: &'tcx hir::Expr,
- operand_expr: &'tcx hir::Expr,
- operand_ty: Ty<'tcx>,
- op: hir::UnOp)
- -> Ty<'tcx>
-{
- assert!(op.is_by_value());
- match lookup_op_method(fcx, ex, operand_ty, vec![],
- token::intern(mname), trait_did,
- operand_expr) {
- Ok(t) => t,
- Err(()) => {
- fcx.type_error_message(ex.span, |actual| {
- format!("cannot apply unary operator `{}` to type `{}`",
- op_str, actual)
- }, operand_ty, None);
- fcx.tcx().types.err
+ pub fn check_user_unop(&self,
+ op_str: &str,
+ mname: &str,
+ trait_did: Option<DefId>,
+ ex: &'gcx hir::Expr,
+ operand_expr: &'gcx hir::Expr,
+ operand_ty: Ty<'tcx>,
+ op: hir::UnOp)
+ -> Ty<'tcx>
+ {
+ assert!(op.is_by_value());
+ match self.lookup_op_method(ex, operand_ty, vec![],
+ token::intern(mname), trait_did,
+ operand_expr) {
+ Ok(t) => t,
+ Err(()) => {
+ self.type_error_message(ex.span, |actual| {
+ format!("cannot apply unary operator `{}` to type `{}`",
+ op_str, actual)
+ }, operand_ty, None);
+ self.tcx.types.err
+ }
}
}
-}
-fn name_and_trait_def_id(fcx: &FnCtxt,
- op: hir::BinOp,
- is_assign: IsAssign)
- -> (&'static str, Option<DefId>) {
- let lang = &fcx.tcx().lang_items;
-
- if let IsAssign::Yes = is_assign {
- match op.node {
- hir::BiAdd => ("add_assign", lang.add_assign_trait()),
- hir::BiSub => ("sub_assign", lang.sub_assign_trait()),
- hir::BiMul => ("mul_assign", lang.mul_assign_trait()),
- hir::BiDiv => ("div_assign", lang.div_assign_trait()),
- hir::BiRem => ("rem_assign", lang.rem_assign_trait()),
- hir::BiBitXor => ("bitxor_assign", lang.bitxor_assign_trait()),
- hir::BiBitAnd => ("bitand_assign", lang.bitand_assign_trait()),
- hir::BiBitOr => ("bitor_assign", lang.bitor_assign_trait()),
- hir::BiShl => ("shl_assign", lang.shl_assign_trait()),
- hir::BiShr => ("shr_assign", lang.shr_assign_trait()),
- hir::BiLt | hir::BiLe | hir::BiGe | hir::BiGt | hir::BiEq | hir::BiNe | hir::BiAnd |
- hir::BiOr => {
- span_bug!(op.span,
- "impossible assignment operation: {}=",
- op.node.as_str())
+ fn name_and_trait_def_id(&self,
+ op: hir::BinOp,
+ is_assign: IsAssign)
+ -> (&'static str, Option<DefId>) {
+ let lang = &self.tcx.lang_items;
+
+ if let IsAssign::Yes = is_assign {
+ match op.node {
+ hir::BiAdd => ("add_assign", lang.add_assign_trait()),
+ hir::BiSub => ("sub_assign", lang.sub_assign_trait()),
+ hir::BiMul => ("mul_assign", lang.mul_assign_trait()),
+ hir::BiDiv => ("div_assign", lang.div_assign_trait()),
+ hir::BiRem => ("rem_assign", lang.rem_assign_trait()),
+ hir::BiBitXor => ("bitxor_assign", lang.bitxor_assign_trait()),
+ hir::BiBitAnd => ("bitand_assign", lang.bitand_assign_trait()),
+ hir::BiBitOr => ("bitor_assign", lang.bitor_assign_trait()),
+ hir::BiShl => ("shl_assign", lang.shl_assign_trait()),
+ hir::BiShr => ("shr_assign", lang.shr_assign_trait()),
+ hir::BiLt | hir::BiLe |
+ hir::BiGe | hir::BiGt |
+ hir::BiEq | hir::BiNe |
+ hir::BiAnd | hir::BiOr => {
+ span_bug!(op.span,
+ "impossible assignment operation: {}=",
+ op.node.as_str())
+ }
}
- }
- } else {
- match op.node {
- hir::BiAdd => ("add", lang.add_trait()),
- hir::BiSub => ("sub", lang.sub_trait()),
- hir::BiMul => ("mul", lang.mul_trait()),
- hir::BiDiv => ("div", lang.div_trait()),
- hir::BiRem => ("rem", lang.rem_trait()),
- hir::BiBitXor => ("bitxor", lang.bitxor_trait()),
- hir::BiBitAnd => ("bitand", lang.bitand_trait()),
- hir::BiBitOr => ("bitor", lang.bitor_trait()),
- hir::BiShl => ("shl", lang.shl_trait()),
- hir::BiShr => ("shr", lang.shr_trait()),
- hir::BiLt => ("lt", lang.ord_trait()),
- hir::BiLe => ("le", lang.ord_trait()),
- hir::BiGe => ("ge", lang.ord_trait()),
- hir::BiGt => ("gt", lang.ord_trait()),
- hir::BiEq => ("eq", lang.eq_trait()),
- hir::BiNe => ("ne", lang.eq_trait()),
- hir::BiAnd | hir::BiOr => {
- span_bug!(op.span, "&& and || are not overloadable")
+ } else {
+ match op.node {
+ hir::BiAdd => ("add", lang.add_trait()),
+ hir::BiSub => ("sub", lang.sub_trait()),
+ hir::BiMul => ("mul", lang.mul_trait()),
+ hir::BiDiv => ("div", lang.div_trait()),
+ hir::BiRem => ("rem", lang.rem_trait()),
+ hir::BiBitXor => ("bitxor", lang.bitxor_trait()),
+ hir::BiBitAnd => ("bitand", lang.bitand_trait()),
+ hir::BiBitOr => ("bitor", lang.bitor_trait()),
+ hir::BiShl => ("shl", lang.shl_trait()),
+ hir::BiShr => ("shr", lang.shr_trait()),
+ hir::BiLt => ("lt", lang.ord_trait()),
+ hir::BiLe => ("le", lang.ord_trait()),
+ hir::BiGe => ("ge", lang.ord_trait()),
+ hir::BiGt => ("gt", lang.ord_trait()),
+ hir::BiEq => ("eq", lang.eq_trait()),
+ hir::BiNe => ("ne", lang.eq_trait()),
+ hir::BiAnd | hir::BiOr => {
+ span_bug!(op.span, "&& and || are not overloadable")
+ }
}
}
}
-}
-fn lookup_op_method<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
- expr: &'tcx hir::Expr,
- lhs_ty: Ty<'tcx>,
- other_tys: Vec<Ty<'tcx>>,
- opname: ast::Name,
- trait_did: Option<DefId>,
- lhs_expr: &'a hir::Expr)
- -> Result<Ty<'tcx>,()>
-{
- debug!("lookup_op_method(expr={:?}, lhs_ty={:?}, opname={:?}, trait_did={:?}, lhs_expr={:?})",
- expr,
- lhs_ty,
- opname,
- trait_did,
- lhs_expr);
-
- let method = match trait_did {
- Some(trait_did) => {
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(lhs_expr),
- opname,
- trait_did,
- 0,
- false,
- lhs_ty,
- Some(other_tys))
- }
- None => None
- };
+ fn lookup_op_method(&self,
+ expr: &'gcx hir::Expr,
+ lhs_ty: Ty<'tcx>,
+ other_tys: Vec<Ty<'tcx>>,
+ opname: ast::Name,
+ trait_did: Option<DefId>,
+ lhs_expr: &'a hir::Expr)
+ -> Result<Ty<'tcx>,()>
+ {
+ debug!("lookup_op_method(expr={:?}, lhs_ty={:?}, opname={:?}, \
+ trait_did={:?}, lhs_expr={:?})",
+ expr,
+ lhs_ty,
+ opname,
+ trait_did,
+ lhs_expr);
+
+ let method = match trait_did {
+ Some(trait_did) => {
+ self.lookup_method_in_trait_adjusted(expr.span,
+ Some(lhs_expr),
+ opname,
+ trait_did,
+ 0,
+ false,
+ lhs_ty,
+ Some(other_tys))
+ }
+ None => None
+ };
- match method {
- Some(method) => {
- let method_ty = method.ty;
+ match method {
+ Some(method) => {
+ let method_ty = method.ty;
- // HACK(eddyb) Fully qualified path to work around a resolve bug.
- let method_call = ::rustc::ty::MethodCall::expr(expr.id);
- fcx.inh.tables.borrow_mut().method_map.insert(method_call, method);
+ // HACK(eddyb) Fully qualified path to work around a resolve bug.
+ let method_call = ::rustc::ty::MethodCall::expr(expr.id);
+ self.tables.borrow_mut().method_map.insert(method_call, method);
- // extract return type for method; all late bound regions
- // should have been instantiated by now
- let ret_ty = method_ty.fn_ret();
- Ok(fcx.tcx().no_late_bound_regions(&ret_ty).unwrap().unwrap())
- }
- None => {
- Err(())
+ // extract return type for method; all late bound regions
+ // should have been instantiated by now
+ let ret_ty = method_ty.fn_ret();
+ Ok(self.tcx.no_late_bound_regions(&ret_ty).unwrap().unwrap())
+ }
+ None => {
+ Err(())
+ }
}
}
}
/// Reason #2 is the killer. I tried for a while to always use
/// overloaded logic and just check the types in constants/trans after
/// the fact, and it worked fine, except for SIMD types. -nmatsakis
-fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>,
- rhs: Ty<'tcx>,
- op: hir::BinOp)
- -> bool
-{
+fn is_builtin_binop(lhs: Ty, rhs: Ty, op: hir::BinOp) -> bool {
match BinOpCategory::from(op) {
BinOpCategory::Shortcircuit => {
true
use middle::region::{self, CodeExtent};
use rustc::ty::subst::Substs;
use rustc::traits;
-use rustc::ty::{self, Ty, TyCtxt, MethodCall, TypeFoldable};
-use rustc::infer::{self, GenericKind, InferCtxt, InferOk, SubregionOrigin, TypeOrigin, VerifyBound};
+use rustc::ty::{self, Ty, MethodCall, TypeFoldable};
+use rustc::infer::{self, GenericKind, InferOk, SubregionOrigin, TypeOrigin, VerifyBound};
use hir::pat_util;
use rustc::ty::adjustment;
use rustc::ty::wf::ImpliedBound;
use std::mem;
+use std::ops::Deref;
use syntax::ast;
use syntax::codemap::Span;
use rustc::hir::intravisit::{self, Visitor};
///////////////////////////////////////////////////////////////////////////
// PUBLIC ENTRY POINTS
-pub fn regionck_expr(fcx: &FnCtxt, e: &hir::Expr) {
- let mut rcx = Rcx::new(fcx, RepeatingScope(e.id), e.id, Subject(e.id));
- if fcx.err_count_since_creation() == 0 {
- // regionck assumes typeck succeeded
- rcx.visit_expr(e);
- rcx.visit_region_obligations(e.id);
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn regionck_expr(&self, e: &hir::Expr) {
+ let mut rcx = RegionCtxt::new(self, RepeatingScope(e.id), e.id, Subject(e.id));
+ if self.err_count_since_creation() == 0 {
+ // regionck assumes typeck succeeded
+ rcx.visit_expr(e);
+ rcx.visit_region_obligations(e.id);
+ }
+ rcx.resolve_regions_and_report_errors();
}
- rcx.resolve_regions_and_report_errors();
-}
-
-/// Region checking during the WF phase for items. `wf_tys` are the
-/// types from which we should derive implied bounds, if any.
-pub fn regionck_item<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
- item_id: ast::NodeId,
- span: Span,
- wf_tys: &[Ty<'tcx>]) {
- debug!("regionck_item(item.id={:?}, wf_tys={:?}", item_id, wf_tys);
- let mut rcx = Rcx::new(fcx, RepeatingScope(item_id), item_id, Subject(item_id));
- let tcx = fcx.tcx();
- rcx.free_region_map
- .relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds);
- rcx.relate_free_regions(wf_tys, item_id, span);
- rcx.visit_region_obligations(item_id);
- rcx.resolve_regions_and_report_errors();
-}
-pub fn regionck_fn(fcx: &FnCtxt,
- fn_id: ast::NodeId,
- fn_span: Span,
- decl: &hir::FnDecl,
- blk: &hir::Block) {
- debug!("regionck_fn(id={})", fn_id);
- let mut rcx = Rcx::new(fcx, RepeatingScope(blk.id), blk.id, Subject(fn_id));
-
- if fcx.err_count_since_creation() == 0 {
- // regionck assumes typeck succeeded
- rcx.visit_fn_body(fn_id, decl, blk, fn_span);
+ /// Region checking during the WF phase for items. `wf_tys` are the
+ /// types from which we should derive implied bounds, if any.
+ pub fn regionck_item(&self,
+ item_id: ast::NodeId,
+ span: Span,
+ wf_tys: &[Ty<'tcx>]) {
+ debug!("regionck_item(item.id={:?}, wf_tys={:?}", item_id, wf_tys);
+ let mut rcx = RegionCtxt::new(self, RepeatingScope(item_id), item_id, Subject(item_id));
+ rcx.free_region_map.relate_free_regions_from_predicates(
+ &self.parameter_environment.caller_bounds);
+ rcx.relate_free_regions(wf_tys, item_id, span);
+ rcx.visit_region_obligations(item_id);
+ rcx.resolve_regions_and_report_errors();
}
- let tcx = fcx.tcx();
- rcx.free_region_map
- .relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds);
+ pub fn regionck_fn(&self,
+ fn_id: ast::NodeId,
+ fn_span: Span,
+ decl: &hir::FnDecl,
+ blk: &hir::Block) {
+ debug!("regionck_fn(id={})", fn_id);
+ let mut rcx = RegionCtxt::new(self, RepeatingScope(blk.id), blk.id, Subject(fn_id));
+
+ if self.err_count_since_creation() == 0 {
+ // regionck assumes typeck succeeded
+ rcx.visit_fn_body(fn_id, decl, blk, fn_span);
+ }
+
+ rcx.free_region_map.relate_free_regions_from_predicates(
+ &self.parameter_environment.caller_bounds);
- rcx.resolve_regions_and_report_errors();
+ rcx.resolve_regions_and_report_errors();
- // For the top-level fn, store the free-region-map. We don't store
- // any map for closures; they just share the same map as the
- // function that created them.
- fcx.tcx().store_free_region_map(fn_id, rcx.free_region_map);
+ // For the top-level fn, store the free-region-map. We don't store
+ // any map for closures; they just share the same map as the
+ // function that created them.
+ self.tcx.store_free_region_map(fn_id, rcx.free_region_map);
+ }
}
///////////////////////////////////////////////////////////////////////////
// INTERNALS
-pub struct Rcx<'a, 'tcx: 'a> {
- pub fcx: &'a FnCtxt<'a, 'tcx>,
+pub struct RegionCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
region_bound_pairs: Vec<(ty::Region, GenericKind<'tcx>)>,
}
+impl<'a, 'gcx, 'tcx> Deref for RegionCtxt<'a, 'gcx, 'tcx> {
+ type Target = FnCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.fcx
+ }
+}
+
pub struct RepeatingScope(ast::NodeId);
pub enum SubjectNode { Subject(ast::NodeId), None }
-impl<'a, 'tcx> Rcx<'a, 'tcx> {
- pub fn new(fcx: &'a FnCtxt<'a, 'tcx>,
+impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> {
+ pub fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
initial_repeating_scope: RepeatingScope,
initial_body_id: ast::NodeId,
- subject: SubjectNode) -> Rcx<'a, 'tcx> {
+ subject: SubjectNode) -> RegionCtxt<'a, 'gcx, 'tcx> {
let RepeatingScope(initial_repeating_scope) = initial_repeating_scope;
- Rcx { fcx: fcx,
- repeating_scope: initial_repeating_scope,
- body_id: initial_body_id,
- call_site_scope: None,
- subject: subject,
- region_bound_pairs: Vec::new(),
- free_region_map: FreeRegionMap::new(),
+ RegionCtxt {
+ fcx: fcx,
+ repeating_scope: initial_repeating_scope,
+ body_id: initial_body_id,
+ call_site_scope: None,
+ subject: subject,
+ region_bound_pairs: Vec::new(),
+ free_region_map: FreeRegionMap::new(),
}
}
- pub fn tcx(&self) -> &'a TyCtxt<'tcx> {
- self.fcx.ccx.tcx
- }
-
- pub fn infcx(&self) -> &InferCtxt<'a,'tcx> {
- self.fcx.infcx()
- }
-
fn set_call_site_scope(&mut self, call_site_scope: Option<CodeExtent>) -> Option<CodeExtent> {
mem::replace(&mut self.call_site_scope, call_site_scope)
}
/// of b will be `&<R0>.i32` and then `*b` will require that `<R0>` be bigger than the let and
/// the `*b` expression, so we will effectively resolve `<R0>` to be the block B.
pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> {
- self.fcx.infcx().resolve_type_vars_if_possible(&unresolved_ty)
+ self.resolve_type_vars_if_possible(&unresolved_ty)
}
/// Try to resolve the type for the given node.
fn resolve_node_type(&self, id: ast::NodeId) -> Ty<'tcx> {
- let t = self.fcx.node_ty(id);
+ let t = self.node_ty(id);
self.resolve_type(t)
}
fn resolve_method_type(&self, method_call: MethodCall) -> Option<Ty<'tcx>> {
- let method_ty = self.fcx.inh.tables.borrow().method_map
+ let method_ty = self.tables.borrow().method_map
.get(&method_call).map(|method| method.ty);
method_ty.map(|method_ty| self.resolve_type(method_ty))
}
ty_unadjusted
} else {
ty_unadjusted.adjust(
- self.fcx.tcx(), expr.span, expr.id,
- self.fcx.inh.tables.borrow().adjustments.get(&expr.id),
+ self.tcx, expr.span, expr.id,
+ self.tables.borrow().adjustments.get(&expr.id),
|method_call| self.resolve_method_type(method_call))
}
}
// When we enter a function, we can derive
debug!("visit_fn_body(id={})", id);
- let call_site = self.fcx.tcx().region_maps.lookup_code_extent(
+ let call_site = self.tcx.region_maps.lookup_code_extent(
region::CodeExtentData::CallSiteScope { fn_id: id, body_id: body.id });
let old_call_site_scope = self.set_call_site_scope(Some(call_site));
let fn_sig = {
- let fn_sig_map = &self.infcx().tables.borrow().liberated_fn_sigs;
+ let fn_sig_map = &self.tables.borrow().liberated_fn_sigs;
match fn_sig_map.get(&id) {
Some(f) => f.clone(),
None => {
let fn_sig_tys: Vec<_> =
fn_sig.inputs.iter()
.cloned()
- .chain(Some(fn_sig.output.unwrap_or(self.tcx().types.bool)))
+ .chain(Some(fn_sig.output.unwrap_or(self.tcx.types.bool)))
.collect();
let old_body_id = self.set_body_id(body.id);
self.relate_free_regions(&fn_sig_tys[..], body.id, span);
- link_fn_args(self,
- self.tcx().region_maps.node_extent(body.id),
- &fn_decl.inputs[..]);
+ self.link_fn_args(self.tcx.region_maps.node_extent(body.id),
+ &fn_decl.inputs[..]);
self.visit_block(body);
self.visit_region_obligations(body.id);
let call_site_scope = self.call_site_scope.unwrap();
debug!("visit_fn_body body.id {} call_site_scope: {:?}",
body.id, call_site_scope);
- type_of_node_must_outlive(self,
- infer::CallReturn(span),
- body.id,
- ty::ReScope(call_site_scope));
+ self.type_of_node_must_outlive(infer::CallReturn(span),
+ body.id,
+ ty::ReScope(call_site_scope));
self.region_bound_pairs.truncate(old_region_bounds_pairs_len);
// region checking can introduce new pending obligations
// which, when processed, might generate new region
// obligations. So make sure we process those.
- self.fcx.select_all_obligations_or_error();
+ self.select_all_obligations_or_error();
// Make a copy of the region obligations vec because we'll need
// to be able to borrow the fulfillment-cx below when projecting.
let region_obligations =
- self.fcx
- .inh
- .fulfillment_cx
+ self.fulfillment_cx
.borrow()
.region_obligations(node_id)
.to_vec();
r_o, r_o.cause);
let sup_type = self.resolve_type(r_o.sup_type);
let origin = self.code_to_origin(r_o.cause.span, sup_type, &r_o.cause.code);
- type_must_outlive(self, origin, sup_type, r_o.sub_region);
+ self.type_must_outlive(origin, sup_type, r_o.sub_region);
}
// Processing the region obligations should not cause the list to grow further:
assert_eq!(region_obligations.len(),
- self.fcx.inh.fulfillment_cx.borrow().region_obligations(node_id).len());
+ self.fulfillment_cx.borrow().region_obligations(node_id).len());
}
fn code_to_origin(&self,
for &ty in fn_sig_tys {
let ty = self.resolve_type(ty);
debug!("relate_free_regions(t={:?})", ty);
- let implied_bounds = ty::wf::implied_bounds(self.fcx.infcx(), body_id, ty, span);
+ let implied_bounds = ty::wf::implied_bounds(self, body_id, ty, span);
// Record any relations between free regions that we observe into the free-region-map.
self.free_region_map.relate_free_regions_from_implied_bounds(&implied_bounds);
match implication {
ImpliedBound::RegionSubRegion(ty::ReFree(free_a),
ty::ReVar(vid_b)) => {
- self.fcx.inh.infcx.add_given(free_a, vid_b);
+ self.add_given(free_a, vid_b);
}
ImpliedBound::RegionSubParam(r_a, param_b) => {
self.region_bound_pairs.push((r_a, GenericKind::Param(param_b)));
}
};
- self.fcx.infcx().resolve_regions_and_report_errors(&self.free_region_map,
- subject_node_id);
+ self.fcx.resolve_regions_and_report_errors(&self.free_region_map,
+ subject_node_id);
+ }
+
+ fn constrain_bindings_in_pat(&mut self, pat: &hir::Pat) {
+ let tcx = self.tcx;
+ debug!("regionck::visit_pat(pat={:?})", pat);
+ pat_util::pat_bindings(&tcx.def_map, pat, |_, id, span, _| {
+ // If we have a variable that contains region'd data, that
+ // data will be accessible from anywhere that the variable is
+ // accessed. We must be wary of loops like this:
+ //
+ // // from src/test/compile-fail/borrowck-lend-flow.rs
+ // let mut v = box 3, w = box 4;
+ // let mut x = &mut w;
+ // loop {
+ // **x += 1; // (2)
+ // borrow(v); //~ ERROR cannot borrow
+ // x = &mut v; // (1)
+ // }
+ //
+ // Typically, we try to determine the region of a borrow from
+ // those points where it is dereferenced. In this case, one
+ // might imagine that the lifetime of `x` need only be the
+ // body of the loop. But of course this is incorrect because
+ // the pointer that is created at point (1) is consumed at
+ // point (2), meaning that it must be live across the loop
+ // iteration. The easiest way to guarantee this is to require
+ // that the lifetime of any regions that appear in a
+ // variable's type enclose at least the variable's scope.
+
+ let var_scope = tcx.region_maps.var_scope(id);
+
+ let origin = infer::BindingTypeIsNotValidAtDecl(span);
+ self.type_of_node_must_outlive(origin, id, ty::ReScope(var_scope));
+
+ let typ = self.resolve_node_type(id);
+ dropck::check_safety_of_destructor_if_necessary(self, typ, span, var_scope);
+ })
}
}
-impl<'a, 'tcx, 'v> Visitor<'v> for Rcx<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for RegionCtxt<'a, 'gcx, 'tcx> {
// (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local,
// However, right now we run into an issue whereby some free
// regions are not properly related if they appear within the
self.visit_fn_body(id, fd, b, span)
}
- fn visit_expr(&mut self, ex: &hir::Expr) { visit_expr(self, ex); }
-
//visit_pat: visit_pat, // (..) see above
- fn visit_arm(&mut self, a: &hir::Arm) { visit_arm(self, a); }
-
- fn visit_local(&mut self, l: &hir::Local) { visit_local(self, l); }
-
- fn visit_block(&mut self, b: &hir::Block) { visit_block(self, b); }
-}
-
-fn visit_block(rcx: &mut Rcx, b: &hir::Block) {
- intravisit::walk_block(rcx, b);
-}
-
-fn visit_arm(rcx: &mut Rcx, arm: &hir::Arm) {
- // see above
- for p in &arm.pats {
- constrain_bindings_in_pat(&p, rcx);
+ fn visit_arm(&mut self, arm: &hir::Arm) {
+ // see above
+ for p in &arm.pats {
+ self.constrain_bindings_in_pat(p);
+ }
+ intravisit::walk_arm(self, arm);
}
- intravisit::walk_arm(rcx, arm);
-}
-
-fn visit_local(rcx: &mut Rcx, l: &hir::Local) {
- // see above
- constrain_bindings_in_pat(&l.pat, rcx);
- link_local(rcx, l);
- intravisit::walk_local(rcx, l);
-}
-
-fn constrain_bindings_in_pat(pat: &hir::Pat, rcx: &mut Rcx) {
- let tcx = rcx.fcx.tcx();
- debug!("regionck::visit_pat(pat={:?})", pat);
- pat_util::pat_bindings(&tcx.def_map, pat, |_, id, span, _| {
- // If we have a variable that contains region'd data, that
- // data will be accessible from anywhere that the variable is
- // accessed. We must be wary of loops like this:
- //
- // // from src/test/compile-fail/borrowck-lend-flow.rs
- // let mut v = box 3, w = box 4;
- // let mut x = &mut w;
- // loop {
- // **x += 1; // (2)
- // borrow(v); //~ ERROR cannot borrow
- // x = &mut v; // (1)
- // }
- //
- // Typically, we try to determine the region of a borrow from
- // those points where it is dereferenced. In this case, one
- // might imagine that the lifetime of `x` need only be the
- // body of the loop. But of course this is incorrect because
- // the pointer that is created at point (1) is consumed at
- // point (2), meaning that it must be live across the loop
- // iteration. The easiest way to guarantee this is to require
- // that the lifetime of any regions that appear in a
- // variable's type enclose at least the variable's scope.
-
- let var_scope = tcx.region_maps.var_scope(id);
-
- let origin = infer::BindingTypeIsNotValidAtDecl(span);
- type_of_node_must_outlive(rcx, origin, id, ty::ReScope(var_scope));
-
- let typ = rcx.resolve_node_type(id);
- dropck::check_safety_of_destructor_if_necessary(rcx, typ, span, var_scope);
- })
-}
+ fn visit_local(&mut self, l: &hir::Local) {
+ // see above
+ self.constrain_bindings_in_pat(&l.pat);
+ self.link_local(l);
+ intravisit::walk_local(self, l);
+ }
-fn visit_expr(rcx: &mut Rcx, expr: &hir::Expr) {
- debug!("regionck::visit_expr(e={:?}, repeating_scope={})",
- expr, rcx.repeating_scope);
-
- // No matter what, the type of each expression must outlive the
- // scope of that expression. This also guarantees basic WF.
- let expr_ty = rcx.resolve_node_type(expr.id);
- // the region corresponding to this expression
- let expr_region = ty::ReScope(rcx.tcx().region_maps.node_extent(expr.id));
- type_must_outlive(rcx, infer::ExprTypeIsNotInScope(expr_ty, expr.span),
- expr_ty, expr_region);
-
- let method_call = MethodCall::expr(expr.id);
- let opt_method_callee = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).cloned();
- let has_method_map = opt_method_callee.is_some();
-
- // If we are calling a method (either explicitly or via an
- // overloaded operator), check that all of the types provided as
- // arguments for its type parameters are well-formed, and all the regions
- // provided as arguments outlive the call.
- if let Some(callee) = opt_method_callee {
- let origin = match expr.node {
- hir::ExprMethodCall(..) =>
- infer::ParameterOrigin::MethodCall,
- hir::ExprUnary(op, _) if op == hir::UnDeref =>
- infer::ParameterOrigin::OverloadedDeref,
- _ =>
- infer::ParameterOrigin::OverloadedOperator
- };
+ fn visit_expr(&mut self, expr: &hir::Expr) {
+ debug!("regionck::visit_expr(e={:?}, repeating_scope={})",
+ expr, self.repeating_scope);
+
+ // No matter what, the type of each expression must outlive the
+ // scope of that expression. This also guarantees basic WF.
+ let expr_ty = self.resolve_node_type(expr.id);
+ // the region corresponding to this expression
+ let expr_region = ty::ReScope(self.tcx.region_maps.node_extent(expr.id));
+ self.type_must_outlive(infer::ExprTypeIsNotInScope(expr_ty, expr.span),
+ expr_ty, expr_region);
+
+ let method_call = MethodCall::expr(expr.id);
+ let opt_method_callee = self.tables.borrow().method_map.get(&method_call).cloned();
+ let has_method_map = opt_method_callee.is_some();
+
+ // If we are calling a method (either explicitly or via an
+ // overloaded operator), check that all of the types provided as
+ // arguments for its type parameters are well-formed, and all the regions
+ // provided as arguments outlive the call.
+ if let Some(callee) = opt_method_callee {
+ let origin = match expr.node {
+ hir::ExprMethodCall(..) =>
+ infer::ParameterOrigin::MethodCall,
+ hir::ExprUnary(op, _) if op == hir::UnDeref =>
+ infer::ParameterOrigin::OverloadedDeref,
+ _ =>
+ infer::ParameterOrigin::OverloadedOperator
+ };
- substs_wf_in_scope(rcx, origin, &callee.substs, expr.span, expr_region);
- type_must_outlive(rcx, infer::ExprTypeIsNotInScope(callee.ty, expr.span),
- callee.ty, expr_region);
- }
+ self.substs_wf_in_scope(origin, &callee.substs, expr.span, expr_region);
+ self.type_must_outlive(infer::ExprTypeIsNotInScope(callee.ty, expr.span),
+ callee.ty, expr_region);
+ }
- // Check any autoderefs or autorefs that appear.
- let adjustment = rcx.fcx.inh.tables.borrow().adjustments.get(&expr.id).map(|a| a.clone());
- if let Some(adjustment) = adjustment {
- debug!("adjustment={:?}", adjustment);
- match adjustment {
- adjustment::AdjustDerefRef(adjustment::AutoDerefRef {
- autoderefs, ref autoref, ..
- }) => {
- let expr_ty = rcx.resolve_node_type(expr.id);
- constrain_autoderefs(rcx, expr, autoderefs, expr_ty);
- if let Some(ref autoref) = *autoref {
- link_autoref(rcx, expr, autoderefs, autoref);
-
- // Require that the resulting region encompasses
- // the current node.
- //
- // FIXME(#6268) remove to support nested method calls
- type_of_node_must_outlive(
- rcx, infer::AutoBorrow(expr.span),
- expr.id, expr_region);
+ // Check any autoderefs or autorefs that appear.
+ let adjustment = self.tables.borrow().adjustments.get(&expr.id).map(|a| a.clone());
+ if let Some(adjustment) = adjustment {
+ debug!("adjustment={:?}", adjustment);
+ match adjustment {
+ adjustment::AdjustDerefRef(adjustment::AutoDerefRef {
+ autoderefs, ref autoref, ..
+ }) => {
+ let expr_ty = self.resolve_node_type(expr.id);
+ self.constrain_autoderefs(expr, autoderefs, expr_ty);
+ if let Some(ref autoref) = *autoref {
+ self.link_autoref(expr, autoderefs, autoref);
+
+ // Require that the resulting region encompasses
+ // the current node.
+ //
+ // FIXME(#6268) remove to support nested method calls
+ self.type_of_node_must_outlive(infer::AutoBorrow(expr.span),
+ expr.id, expr_region);
+ }
+ }
+ /*
+ adjustment::AutoObject(_, ref bounds, _, _) => {
+ // Determine if we are casting `expr` to a trait
+ // instance. If so, we have to be sure that the type
+ // of the source obeys the new region bound.
+ let source_ty = self.resolve_node_type(expr.id);
+ self.type_must_outlive(infer::RelateObjectBound(expr.span),
+ source_ty, bounds.region_bound);
}
+ */
+ _ => {}
}
- /*
- adjustment::AutoObject(_, ref bounds, _, _) => {
- // Determine if we are casting `expr` to a trait
- // instance. If so, we have to be sure that the type
- // of the source obeys the new region bound.
- let source_ty = rcx.resolve_node_type(expr.id);
- type_must_outlive(rcx, infer::RelateObjectBound(expr.span),
- source_ty, bounds.region_bound);
+
+ // If necessary, constrain destructors in the unadjusted form of this
+ // expression.
+ let cmt_result = {
+ let mc = mc::MemCategorizationContext::new(self);
+ mc.cat_expr_unadjusted(expr)
+ };
+ match cmt_result {
+ Ok(head_cmt) => {
+ self.check_safety_of_rvalue_destructor_if_necessary(head_cmt,
+ expr.span);
+ }
+ Err(..) => {
+ self.tcx.sess.delay_span_bug(expr.span, "cat_expr_unadjusted Errd");
+ }
}
- */
- _ => {}
}
- // If necessary, constrain destructors in the unadjusted form of this
- // expression.
+ // If necessary, constrain destructors in this expression. This will be
+ // the adjusted form if there is an adjustment.
let cmt_result = {
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- mc.cat_expr_unadjusted(expr)
+ let mc = mc::MemCategorizationContext::new(self);
+ mc.cat_expr(expr)
};
match cmt_result {
Ok(head_cmt) => {
- check_safety_of_rvalue_destructor_if_necessary(rcx,
- head_cmt,
- expr.span);
+ self.check_safety_of_rvalue_destructor_if_necessary(head_cmt, expr.span);
}
Err(..) => {
- let tcx = rcx.fcx.tcx();
- tcx.sess.delay_span_bug(expr.span, "cat_expr_unadjusted Errd");
+ self.tcx.sess.delay_span_bug(expr.span, "cat_expr Errd");
}
}
- }
- // If necessary, constrain destructors in this expression. This will be
- // the adjusted form if there is an adjustment.
- let cmt_result = {
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- mc.cat_expr(expr)
- };
- match cmt_result {
- Ok(head_cmt) => {
- check_safety_of_rvalue_destructor_if_necessary(rcx, head_cmt, expr.span);
- }
- Err(..) => {
- let tcx = rcx.fcx.tcx();
- tcx.sess.delay_span_bug(expr.span, "cat_expr Errd");
- }
- }
-
- debug!("regionck::visit_expr(e={:?}, repeating_scope={}) - visiting subexprs",
- expr, rcx.repeating_scope);
- match expr.node {
- hir::ExprPath(..) => {
- rcx.fcx.opt_node_ty_substs(expr.id, |item_substs| {
- let origin = infer::ParameterOrigin::Path;
- substs_wf_in_scope(rcx, origin, &item_substs.substs, expr.span, expr_region);
- });
- }
-
- hir::ExprCall(ref callee, ref args) => {
- if has_method_map {
- constrain_call(rcx, expr, Some(&callee),
- args.iter().map(|e| &**e), false);
- } else {
- constrain_callee(rcx, callee.id, expr, &callee);
- constrain_call(rcx, expr, None,
- args.iter().map(|e| &**e), false);
+ debug!("regionck::visit_expr(e={:?}, repeating_scope={}) - visiting subexprs",
+ expr, self.repeating_scope);
+ match expr.node {
+ hir::ExprPath(..) => {
+ self.fcx.opt_node_ty_substs(expr.id, |item_substs| {
+ let origin = infer::ParameterOrigin::Path;
+ self.substs_wf_in_scope(origin, &item_substs.substs, expr.span, expr_region);
+ });
}
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprCall(ref callee, ref args) => {
+ if has_method_map {
+ self.constrain_call(expr, Some(&callee),
+ args.iter().map(|e| &**e), false);
+ } else {
+ self.constrain_callee(callee.id, expr, &callee);
+ self.constrain_call(expr, None,
+ args.iter().map(|e| &**e), false);
+ }
- hir::ExprMethodCall(_, _, ref args) => {
- constrain_call(rcx, expr, Some(&args[0]),
- args[1..].iter().map(|e| &**e), false);
+ intravisit::walk_expr(self, expr);
+ }
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprMethodCall(_, _, ref args) => {
+ self.constrain_call(expr, Some(&args[0]),
+ args[1..].iter().map(|e| &**e), false);
- hir::ExprAssignOp(_, ref lhs, ref rhs) => {
- if has_method_map {
- constrain_call(rcx, expr, Some(&lhs),
- Some(&**rhs).into_iter(), false);
+ intravisit::walk_expr(self, expr);
}
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprAssignOp(_, ref lhs, ref rhs) => {
+ if has_method_map {
+ self.constrain_call(expr, Some(&lhs),
+ Some(&**rhs).into_iter(), false);
+ }
- hir::ExprIndex(ref lhs, ref rhs) if has_method_map => {
- constrain_call(rcx, expr, Some(&lhs),
- Some(&**rhs).into_iter(), true);
+ intravisit::walk_expr(self, expr);
+ }
- intravisit::walk_expr(rcx, expr);
- },
+ hir::ExprIndex(ref lhs, ref rhs) if has_method_map => {
+ self.constrain_call(expr, Some(&lhs),
+ Some(&**rhs).into_iter(), true);
- hir::ExprBinary(op, ref lhs, ref rhs) if has_method_map => {
- let implicitly_ref_args = !op.node.is_by_value();
+ intravisit::walk_expr(self, expr);
+ },
- // As `expr_method_call`, but the call is via an
- // overloaded op. Note that we (sadly) currently use an
- // implicit "by ref" sort of passing style here. This
- // should be converted to an adjustment!
- constrain_call(rcx, expr, Some(&lhs),
- Some(&**rhs).into_iter(), implicitly_ref_args);
+ hir::ExprBinary(op, ref lhs, ref rhs) if has_method_map => {
+ let implicitly_ref_args = !op.node.is_by_value();
- intravisit::walk_expr(rcx, expr);
- }
+ // As `expr_method_call`, but the call is via an
+ // overloaded op. Note that we (sadly) currently use an
+ // implicit "by ref" sort of passing style here. This
+ // should be converted to an adjustment!
+ self.constrain_call(expr, Some(&lhs),
+ Some(&**rhs).into_iter(), implicitly_ref_args);
- hir::ExprBinary(_, ref lhs, ref rhs) => {
- // If you do `x OP y`, then the types of `x` and `y` must
- // outlive the operation you are performing.
- let lhs_ty = rcx.resolve_expr_type_adjusted(&lhs);
- let rhs_ty = rcx.resolve_expr_type_adjusted(&rhs);
- for &ty in &[lhs_ty, rhs_ty] {
- type_must_outlive(rcx,
- infer::Operand(expr.span),
- ty,
- expr_region);
+ intravisit::walk_expr(self, expr);
}
- intravisit::walk_expr(rcx, expr);
- }
- hir::ExprUnary(op, ref lhs) if has_method_map => {
- let implicitly_ref_args = !op.is_by_value();
+ hir::ExprBinary(_, ref lhs, ref rhs) => {
+ // If you do `x OP y`, then the types of `x` and `y` must
+ // outlive the operation you are performing.
+ let lhs_ty = self.resolve_expr_type_adjusted(&lhs);
+ let rhs_ty = self.resolve_expr_type_adjusted(&rhs);
+ for &ty in &[lhs_ty, rhs_ty] {
+ self.type_must_outlive(infer::Operand(expr.span),
+ ty, expr_region);
+ }
+ intravisit::walk_expr(self, expr);
+ }
- // As above.
- constrain_call(rcx, expr, Some(&lhs),
- None::<hir::Expr>.iter(), implicitly_ref_args);
+ hir::ExprUnary(op, ref lhs) if has_method_map => {
+ let implicitly_ref_args = !op.is_by_value();
- intravisit::walk_expr(rcx, expr);
- }
+ // As above.
+ self.constrain_call(expr, Some(&lhs),
+ None::<hir::Expr>.iter(), implicitly_ref_args);
- hir::ExprUnary(hir::UnDeref, ref base) => {
- // For *a, the lifetime of a must enclose the deref
- let method_call = MethodCall::expr(expr.id);
- let base_ty = match rcx.fcx.inh.tables.borrow().method_map.get(&method_call) {
- Some(method) => {
- constrain_call(rcx, expr, Some(&base),
- None::<hir::Expr>.iter(), true);
- let fn_ret = // late-bound regions in overloaded method calls are instantiated
- rcx.tcx().no_late_bound_regions(&method.ty.fn_ret()).unwrap();
- fn_ret.unwrap()
- }
- None => rcx.resolve_node_type(base.id)
- };
- if let ty::TyRef(r_ptr, _) = base_ty.sty {
- mk_subregion_due_to_dereference(
- rcx, expr.span, expr_region, *r_ptr);
+ intravisit::walk_expr(self, expr);
}
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprUnary(hir::UnDeref, ref base) => {
+ // For *a, the lifetime of a must enclose the deref
+ let method_call = MethodCall::expr(expr.id);
+ let base_ty = match self.tables.borrow().method_map.get(&method_call) {
+ Some(method) => {
+ self.constrain_call(expr, Some(&base),
+ None::<hir::Expr>.iter(), true);
+ // late-bound regions in overloaded method calls are instantiated
+ let fn_ret = self.tcx.no_late_bound_regions(&method.ty.fn_ret());
+ fn_ret.unwrap().unwrap()
+ }
+ None => self.resolve_node_type(base.id)
+ };
+ if let ty::TyRef(r_ptr, _) = base_ty.sty {
+ self.mk_subregion_due_to_dereference(expr.span, expr_region, *r_ptr);
+ }
- hir::ExprIndex(ref vec_expr, _) => {
- // For a[b], the lifetime of a must enclose the deref
- let vec_type = rcx.resolve_expr_type_adjusted(&vec_expr);
- constrain_index(rcx, expr, vec_type);
+ intravisit::walk_expr(self, expr);
+ }
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprIndex(ref vec_expr, _) => {
+ // For a[b], the lifetime of a must enclose the deref
+ let vec_type = self.resolve_expr_type_adjusted(&vec_expr);
+ self.constrain_index(expr, vec_type);
- hir::ExprCast(ref source, _) => {
- // Determine if we are casting `source` to a trait
- // instance. If so, we have to be sure that the type of
- // the source obeys the trait's region bound.
- constrain_cast(rcx, expr, &source);
- intravisit::walk_expr(rcx, expr);
- }
+ intravisit::walk_expr(self, expr);
+ }
- hir::ExprAddrOf(m, ref base) => {
- link_addr_of(rcx, expr, m, &base);
+ hir::ExprCast(ref source, _) => {
+ // Determine if we are casting `source` to a trait
+ // instance. If so, we have to be sure that the type of
+ // the source obeys the trait's region bound.
+ self.constrain_cast(expr, &source);
+ intravisit::walk_expr(self, expr);
+ }
- // Require that when you write a `&expr` expression, the
- // resulting pointer has a lifetime that encompasses the
- // `&expr` expression itself. Note that we constraining
- // the type of the node expr.id here *before applying
- // adjustments*.
- //
- // FIXME(#6268) nested method calls requires that this rule change
- let ty0 = rcx.resolve_node_type(expr.id);
- type_must_outlive(rcx, infer::AddrOf(expr.span), ty0, expr_region);
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprAddrOf(m, ref base) => {
+ self.link_addr_of(expr, m, &base);
+
+ // Require that when you write a `&expr` expression, the
+ // resulting pointer has a lifetime that encompasses the
+ // `&expr` expression itself. Note that we constraining
+ // the type of the node expr.id here *before applying
+ // adjustments*.
+ //
+ // FIXME(#6268) nested method calls requires that this rule change
+ let ty0 = self.resolve_node_type(expr.id);
+ self.type_must_outlive(infer::AddrOf(expr.span), ty0, expr_region);
+ intravisit::walk_expr(self, expr);
+ }
- hir::ExprMatch(ref discr, ref arms, _) => {
- link_match(rcx, &discr, &arms[..]);
+ hir::ExprMatch(ref discr, ref arms, _) => {
+ self.link_match(&discr, &arms[..]);
- intravisit::walk_expr(rcx, expr);
- }
+ intravisit::walk_expr(self, expr);
+ }
- hir::ExprClosure(_, _, ref body, _) => {
- check_expr_fn_block(rcx, expr, &body);
- }
+ hir::ExprClosure(_, _, ref body, _) => {
+ self.check_expr_fn_block(expr, &body);
+ }
- hir::ExprLoop(ref body, _) => {
- let repeating_scope = rcx.set_repeating_scope(body.id);
- intravisit::walk_expr(rcx, expr);
- rcx.set_repeating_scope(repeating_scope);
- }
+ hir::ExprLoop(ref body, _) => {
+ let repeating_scope = self.set_repeating_scope(body.id);
+ intravisit::walk_expr(self, expr);
+ self.set_repeating_scope(repeating_scope);
+ }
- hir::ExprWhile(ref cond, ref body, _) => {
- let repeating_scope = rcx.set_repeating_scope(cond.id);
- rcx.visit_expr(&cond);
+ hir::ExprWhile(ref cond, ref body, _) => {
+ let repeating_scope = self.set_repeating_scope(cond.id);
+ self.visit_expr(&cond);
- rcx.set_repeating_scope(body.id);
- rcx.visit_block(&body);
+ self.set_repeating_scope(body.id);
+ self.visit_block(&body);
- rcx.set_repeating_scope(repeating_scope);
- }
+ self.set_repeating_scope(repeating_scope);
+ }
- hir::ExprRet(Some(ref ret_expr)) => {
- let call_site_scope = rcx.call_site_scope;
- debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}",
- ret_expr.id, call_site_scope);
- type_of_node_must_outlive(rcx,
- infer::CallReturn(ret_expr.span),
- ret_expr.id,
- ty::ReScope(call_site_scope.unwrap()));
- intravisit::walk_expr(rcx, expr);
- }
+ hir::ExprRet(Some(ref ret_expr)) => {
+ let call_site_scope = self.call_site_scope;
+ debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}",
+ ret_expr.id, call_site_scope);
+ self.type_of_node_must_outlive(infer::CallReturn(ret_expr.span),
+ ret_expr.id,
+ ty::ReScope(call_site_scope.unwrap()));
+ intravisit::walk_expr(self, expr);
+ }
- _ => {
- intravisit::walk_expr(rcx, expr);
+ _ => {
+ intravisit::walk_expr(self, expr);
+ }
}
}
}
-fn constrain_cast(rcx: &mut Rcx,
- cast_expr: &hir::Expr,
- source_expr: &hir::Expr)
-{
- debug!("constrain_cast(cast_expr={:?}, source_expr={:?})",
- cast_expr,
- source_expr);
+impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> {
+ fn constrain_cast(&mut self,
+ cast_expr: &hir::Expr,
+ source_expr: &hir::Expr)
+ {
+ debug!("constrain_cast(cast_expr={:?}, source_expr={:?})",
+ cast_expr,
+ source_expr);
- let source_ty = rcx.resolve_node_type(source_expr.id);
- let target_ty = rcx.resolve_node_type(cast_expr.id);
+ let source_ty = self.resolve_node_type(source_expr.id);
+ let target_ty = self.resolve_node_type(cast_expr.id);
- walk_cast(rcx, cast_expr, source_ty, target_ty);
+ self.walk_cast(cast_expr, source_ty, target_ty);
+ }
- fn walk_cast<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
- cast_expr: &hir::Expr,
- from_ty: Ty<'tcx>,
- to_ty: Ty<'tcx>) {
+ fn walk_cast(&mut self,
+ cast_expr: &hir::Expr,
+ from_ty: Ty<'tcx>,
+ to_ty: Ty<'tcx>) {
debug!("walk_cast(from_ty={:?}, to_ty={:?})",
from_ty,
to_ty);
/*From:*/ (&ty::TyRef(from_r, ref from_mt),
/*To: */ &ty::TyRef(to_r, ref to_mt)) => {
// Target cannot outlive source, naturally.
- rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), *to_r, *from_r);
- walk_cast(rcx, cast_expr, from_mt.ty, to_mt.ty);
+ self.sub_regions(infer::Reborrow(cast_expr.span), *to_r, *from_r);
+ self.walk_cast(cast_expr, from_mt.ty, to_mt.ty);
}
/*From:*/ (_,
/*To: */ &ty::TyTrait(box ty::TraitTy { ref bounds, .. })) => {
// When T is existentially quantified as a trait
// `Foo+'to`, it must outlive the region bound `'to`.
- type_must_outlive(rcx, infer::RelateObjectBound(cast_expr.span),
- from_ty, bounds.region_bound);
+ self.type_must_outlive(infer::RelateObjectBound(cast_expr.span),
+ from_ty, bounds.region_bound);
}
/*From:*/ (&ty::TyBox(from_referent_ty),
/*To: */ &ty::TyBox(to_referent_ty)) => {
- walk_cast(rcx, cast_expr, from_referent_ty, to_referent_ty);
+ self.walk_cast(cast_expr, from_referent_ty, to_referent_ty);
}
_ => { }
}
}
-}
-fn check_expr_fn_block(rcx: &mut Rcx,
- expr: &hir::Expr,
- body: &hir::Block) {
- let repeating_scope = rcx.set_repeating_scope(body.id);
- intravisit::walk_expr(rcx, expr);
- rcx.set_repeating_scope(repeating_scope);
-}
+ fn check_expr_fn_block(&mut self,
+ expr: &hir::Expr,
+ body: &hir::Block) {
+ let repeating_scope = self.set_repeating_scope(body.id);
+ intravisit::walk_expr(self, expr);
+ self.set_repeating_scope(repeating_scope);
+ }
-fn constrain_callee(rcx: &mut Rcx,
- callee_id: ast::NodeId,
- _call_expr: &hir::Expr,
- _callee_expr: &hir::Expr) {
- let callee_ty = rcx.resolve_node_type(callee_id);
- match callee_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => { }
- _ => {
- // this should not happen, but it does if the program is
- // erroneous
- //
- // bug!(
- // callee_expr.span,
- // "Calling non-function: {}",
- // callee_ty);
+ fn constrain_callee(&mut self,
+ callee_id: ast::NodeId,
+ _call_expr: &hir::Expr,
+ _callee_expr: &hir::Expr) {
+ let callee_ty = self.resolve_node_type(callee_id);
+ match callee_ty.sty {
+ ty::TyFnDef(..) | ty::TyFnPtr(_) => { }
+ _ => {
+ // this should not happen, but it does if the program is
+ // erroneous
+ //
+ // bug!(
+ // callee_expr.span,
+ // "Calling non-function: {}",
+ // callee_ty);
+ }
}
}
-}
-fn constrain_call<'a, I: Iterator<Item=&'a hir::Expr>>(rcx: &mut Rcx,
- call_expr: &hir::Expr,
- receiver: Option<&hir::Expr>,
- arg_exprs: I,
- implicitly_ref_args: bool) {
- //! Invoked on every call site (i.e., normal calls, method calls,
- //! and overloaded operators). Constrains the regions which appear
- //! in the type of the function. Also constrains the regions that
- //! appear in the arguments appropriately.
-
- debug!("constrain_call(call_expr={:?}, \
- receiver={:?}, \
- implicitly_ref_args={})",
- call_expr,
- receiver,
- implicitly_ref_args);
-
- // `callee_region` is the scope representing the time in which the
- // call occurs.
- //
- // FIXME(#6268) to support nested method calls, should be callee_id
- let callee_scope = rcx.tcx().region_maps.node_extent(call_expr.id);
- let callee_region = ty::ReScope(callee_scope);
-
- debug!("callee_region={:?}", callee_region);
-
- for arg_expr in arg_exprs {
- debug!("Argument: {:?}", arg_expr);
-
- // ensure that any regions appearing in the argument type are
- // valid for at least the lifetime of the function:
- type_of_node_must_outlive(
- rcx, infer::CallArg(arg_expr.span),
- arg_expr.id, callee_region);
-
- // unfortunately, there are two means of taking implicit
- // references, and we need to propagate constraints as a
- // result. modes are going away and the "DerefArgs" code
- // should be ported to use adjustments
- if implicitly_ref_args {
- link_by_ref(rcx, arg_expr, callee_scope);
+ fn constrain_call<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self,
+ call_expr: &hir::Expr,
+ receiver: Option<&hir::Expr>,
+ arg_exprs: I,
+ implicitly_ref_args: bool) {
+ //! Invoked on every call site (i.e., normal calls, method calls,
+ //! and overloaded operators). Constrains the regions which appear
+ //! in the type of the function. Also constrains the regions that
+ //! appear in the arguments appropriately.
+
+ debug!("constrain_call(call_expr={:?}, \
+ receiver={:?}, \
+ implicitly_ref_args={})",
+ call_expr,
+ receiver,
+ implicitly_ref_args);
+
+ // `callee_region` is the scope representing the time in which the
+ // call occurs.
+ //
+ // FIXME(#6268) to support nested method calls, should be callee_id
+ let callee_scope = self.tcx.region_maps.node_extent(call_expr.id);
+ let callee_region = ty::ReScope(callee_scope);
+
+ debug!("callee_region={:?}", callee_region);
+
+ for arg_expr in arg_exprs {
+ debug!("Argument: {:?}", arg_expr);
+
+ // ensure that any regions appearing in the argument type are
+ // valid for at least the lifetime of the function:
+ self.type_of_node_must_outlive(infer::CallArg(arg_expr.span),
+ arg_expr.id, callee_region);
+
+ // unfortunately, there are two means of taking implicit
+ // references, and we need to propagate constraints as a
+ // result. modes are going away and the "DerefArgs" code
+ // should be ported to use adjustments
+ if implicitly_ref_args {
+ self.link_by_ref(arg_expr, callee_scope);
+ }
}
- }
- // as loop above, but for receiver
- if let Some(r) = receiver {
- debug!("receiver: {:?}", r);
- type_of_node_must_outlive(
- rcx, infer::CallRcvr(r.span),
- r.id, callee_region);
- if implicitly_ref_args {
- link_by_ref(rcx, &r, callee_scope);
+ // as loop above, but for receiver
+ if let Some(r) = receiver {
+ debug!("receiver: {:?}", r);
+ self.type_of_node_must_outlive(infer::CallRcvr(r.span),
+ r.id, callee_region);
+ if implicitly_ref_args {
+ self.link_by_ref(&r, callee_scope);
+ }
}
}
-}
-/// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being
-/// dereferenced, the lifetime of the pointer includes the deref expr.
-fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
- deref_expr: &hir::Expr,
- derefs: usize,
- mut derefd_ty: Ty<'tcx>)
-{
- debug!("constrain_autoderefs(deref_expr={:?}, derefs={}, derefd_ty={:?})",
- deref_expr,
- derefs,
- derefd_ty);
-
- let s_deref_expr = rcx.tcx().region_maps.node_extent(deref_expr.id);
- let r_deref_expr = ty::ReScope(s_deref_expr);
- for i in 0..derefs {
- let method_call = MethodCall::autoderef(deref_expr.id, i as u32);
- debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs);
-
- let method = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).map(|m| m.clone());
-
- derefd_ty = match method {
- Some(method) => {
- debug!("constrain_autoderefs: #{} is overloaded, method={:?}",
- i, method);
-
- let origin = infer::ParameterOrigin::OverloadedDeref;
- substs_wf_in_scope(rcx, origin, method.substs, deref_expr.span, r_deref_expr);
-
- // Treat overloaded autoderefs as if an AutoRef adjustment
- // was applied on the base type, as that is always the case.
- let fn_sig = method.ty.fn_sig();
- let fn_sig = // late-bound regions should have been instantiated
- rcx.tcx().no_late_bound_regions(fn_sig).unwrap();
- let self_ty = fn_sig.inputs[0];
- let (m, r) = match self_ty.sty {
- ty::TyRef(r, ref m) => (m.mutbl, r),
- _ => {
- span_bug!(
- deref_expr.span,
- "bad overloaded deref type {:?}",
- method.ty)
- }
- };
+ /// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being
+ /// dereferenced, the lifetime of the pointer includes the deref expr.
+ fn constrain_autoderefs(&mut self,
+ deref_expr: &hir::Expr,
+ derefs: usize,
+ mut derefd_ty: Ty<'tcx>)
+ {
+ debug!("constrain_autoderefs(deref_expr={:?}, derefs={}, derefd_ty={:?})",
+ deref_expr,
+ derefs,
+ derefd_ty);
- debug!("constrain_autoderefs: receiver r={:?} m={:?}",
- r, m);
+ let s_deref_expr = self.tcx.region_maps.node_extent(deref_expr.id);
+ let r_deref_expr = ty::ReScope(s_deref_expr);
+ for i in 0..derefs {
+ let method_call = MethodCall::autoderef(deref_expr.id, i as u32);
+ debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs);
- {
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i));
- debug!("constrain_autoderefs: self_cmt={:?}",
- self_cmt);
- link_region(rcx, deref_expr.span, r,
- ty::BorrowKind::from_mutbl(m), self_cmt);
- }
+ let method = self.tables.borrow().method_map.get(&method_call).map(|m| m.clone());
+
+ derefd_ty = match method {
+ Some(method) => {
+ debug!("constrain_autoderefs: #{} is overloaded, method={:?}",
+ i, method);
+
+ let origin = infer::ParameterOrigin::OverloadedDeref;
+ self.substs_wf_in_scope(origin, method.substs, deref_expr.span, r_deref_expr);
+
+ // Treat overloaded autoderefs as if an AutoRef adjustment
+ // was applied on the base type, as that is always the case.
+ let fn_sig = method.ty.fn_sig();
+ let fn_sig = // late-bound regions should have been instantiated
+ self.tcx.no_late_bound_regions(fn_sig).unwrap();
+ let self_ty = fn_sig.inputs[0];
+ let (m, r) = match self_ty.sty {
+ ty::TyRef(r, ref m) => (m.mutbl, r),
+ _ => {
+ span_bug!(
+ deref_expr.span,
+ "bad overloaded deref type {:?}",
+ method.ty)
+ }
+ };
+
+ debug!("constrain_autoderefs: receiver r={:?} m={:?}",
+ r, m);
+
+ {
+ let mc = mc::MemCategorizationContext::new(self);
+ let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i));
+ debug!("constrain_autoderefs: self_cmt={:?}",
+ self_cmt);
+ self.link_region(deref_expr.span, r,
+ ty::BorrowKind::from_mutbl(m), self_cmt);
+ }
- // Specialized version of constrain_call.
- type_must_outlive(rcx, infer::CallRcvr(deref_expr.span),
- self_ty, r_deref_expr);
- match fn_sig.output {
- ty::FnConverging(return_type) => {
- type_must_outlive(rcx, infer::CallReturn(deref_expr.span),
- return_type, r_deref_expr);
- return_type
+ // Specialized version of constrain_call.
+ self.type_must_outlive(infer::CallRcvr(deref_expr.span),
+ self_ty, r_deref_expr);
+ match fn_sig.output {
+ ty::FnConverging(return_type) => {
+ self.type_must_outlive(infer::CallReturn(deref_expr.span),
+ return_type, r_deref_expr);
+ return_type
+ }
+ ty::FnDiverging => bug!()
}
- ty::FnDiverging => bug!()
}
- }
- None => derefd_ty
- };
+ None => derefd_ty
+ };
- if let ty::TyRef(r_ptr, _) = derefd_ty.sty {
- mk_subregion_due_to_dereference(rcx, deref_expr.span,
- r_deref_expr, *r_ptr);
- }
+ if let ty::TyRef(r_ptr, _) = derefd_ty.sty {
+ self.mk_subregion_due_to_dereference(deref_expr.span,
+ r_deref_expr, *r_ptr);
+ }
- match derefd_ty.builtin_deref(true, ty::NoPreference) {
- Some(mt) => derefd_ty = mt.ty,
- /* if this type can't be dereferenced, then there's already an error
- in the session saying so. Just bail out for now */
- None => break
+ match derefd_ty.builtin_deref(true, ty::NoPreference) {
+ Some(mt) => derefd_ty = mt.ty,
+ /* if this type can't be dereferenced, then there's already an error
+ in the session saying so. Just bail out for now */
+ None => break
+ }
}
}
-}
-pub fn mk_subregion_due_to_dereference(rcx: &mut Rcx,
- deref_span: Span,
- minimum_lifetime: ty::Region,
- maximum_lifetime: ty::Region) {
- rcx.fcx.mk_subr(infer::DerefPointer(deref_span),
- minimum_lifetime, maximum_lifetime)
-}
+ pub fn mk_subregion_due_to_dereference(&mut self,
+ deref_span: Span,
+ minimum_lifetime: ty::Region,
+ maximum_lifetime: ty::Region) {
+ self.sub_regions(infer::DerefPointer(deref_span),
+ minimum_lifetime, maximum_lifetime)
+ }
-fn check_safety_of_rvalue_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
- cmt: mc::cmt<'tcx>,
- span: Span) {
- match cmt.cat {
- Categorization::Rvalue(region) => {
- match region {
- ty::ReScope(rvalue_scope) => {
- let typ = rcx.resolve_type(cmt.ty);
- dropck::check_safety_of_destructor_if_necessary(rcx,
- typ,
- span,
- rvalue_scope);
- }
- ty::ReStatic => {}
- region => {
- span_bug!(span,
- "unexpected rvalue region in rvalue \
- destructor safety checking: `{:?}`",
- region);
+ fn check_safety_of_rvalue_destructor_if_necessary(&mut self,
+ cmt: mc::cmt<'tcx>,
+ span: Span) {
+ match cmt.cat {
+ Categorization::Rvalue(region) => {
+ match region {
+ ty::ReScope(rvalue_scope) => {
+ let typ = self.resolve_type(cmt.ty);
+ dropck::check_safety_of_destructor_if_necessary(self,
+ typ,
+ span,
+ rvalue_scope);
+ }
+ ty::ReStatic => {}
+ region => {
+ span_bug!(span,
+ "unexpected rvalue region in rvalue \
+ destructor safety checking: `{:?}`",
+ region);
+ }
}
}
+ _ => {}
}
- _ => {}
}
-}
-/// Invoked on any index expression that occurs. Checks that if this is a slice being indexed, the
-/// lifetime of the pointer includes the deref expr.
-fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
- index_expr: &hir::Expr,
- indexed_ty: Ty<'tcx>)
-{
- debug!("constrain_index(index_expr=?, indexed_ty={}",
- rcx.fcx.infcx().ty_to_string(indexed_ty));
-
- let r_index_expr = ty::ReScope(rcx.tcx().region_maps.node_extent(index_expr.id));
- if let ty::TyRef(r_ptr, mt) = indexed_ty.sty {
- match mt.ty.sty {
- ty::TySlice(_) | ty::TyStr => {
- rcx.fcx.mk_subr(infer::IndexSlice(index_expr.span),
- r_index_expr, *r_ptr);
+ /// Invoked on any index expression that occurs. Checks that if this is a slice
+ /// being indexed, the lifetime of the pointer includes the deref expr.
+ fn constrain_index(&mut self,
+ index_expr: &hir::Expr,
+ indexed_ty: Ty<'tcx>)
+ {
+ debug!("constrain_index(index_expr=?, indexed_ty={}",
+ self.ty_to_string(indexed_ty));
+
+ let r_index_expr = ty::ReScope(self.tcx.region_maps.node_extent(index_expr.id));
+ if let ty::TyRef(r_ptr, mt) = indexed_ty.sty {
+ match mt.ty.sty {
+ ty::TySlice(_) | ty::TyStr => {
+ self.sub_regions(infer::IndexSlice(index_expr.span),
+ r_index_expr, *r_ptr);
+ }
+ _ => {}
}
- _ => {}
}
}
-}
-/// Guarantees that any lifetimes which appear in the type of the node `id` (after applying
-/// adjustments) are valid for at least `minimum_lifetime`
-fn type_of_node_must_outlive<'a, 'tcx>(
- rcx: &mut Rcx<'a, 'tcx>,
- origin: infer::SubregionOrigin<'tcx>,
- id: ast::NodeId,
- minimum_lifetime: ty::Region)
-{
- let tcx = rcx.fcx.tcx();
-
- // Try to resolve the type. If we encounter an error, then typeck
- // is going to fail anyway, so just stop here and let typeck
- // report errors later on in the writeback phase.
- let ty0 = rcx.resolve_node_type(id);
- let ty = ty0.adjust(tcx, origin.span(), id,
- rcx.fcx.inh.tables.borrow().adjustments.get(&id),
- |method_call| rcx.resolve_method_type(method_call));
- debug!("constrain_regions_in_type_of_node(\
- ty={}, ty0={}, id={}, minimum_lifetime={:?})",
- ty, ty0,
- id, minimum_lifetime);
- type_must_outlive(rcx, origin, ty, minimum_lifetime);
-}
+ /// Guarantees that any lifetimes which appear in the type of the node `id` (after applying
+ /// adjustments) are valid for at least `minimum_lifetime`
+ fn type_of_node_must_outlive(&mut self,
+ origin: infer::SubregionOrigin<'tcx>,
+ id: ast::NodeId,
+ minimum_lifetime: ty::Region)
+ {
+ let tcx = self.tcx;
+
+ // Try to resolve the type. If we encounter an error, then typeck
+ // is going to fail anyway, so just stop here and let typeck
+ // report errors later on in the writeback phase.
+ let ty0 = self.resolve_node_type(id);
+ let ty = ty0.adjust(tcx, origin.span(), id,
+ self.tables.borrow().adjustments.get(&id),
+ |method_call| self.resolve_method_type(method_call));
+ debug!("constrain_regions_in_type_of_node(\
+ ty={}, ty0={}, id={}, minimum_lifetime={:?})",
+ ty, ty0,
+ id, minimum_lifetime);
+ self.type_must_outlive(origin, ty, minimum_lifetime);
+ }
-/// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the
-/// resulting pointer is linked to the lifetime of its guarantor (if any).
-fn link_addr_of(rcx: &mut Rcx, expr: &hir::Expr,
- mutability: hir::Mutability, base: &hir::Expr) {
- debug!("link_addr_of(expr={:?}, base={:?})", expr, base);
+ /// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the
+ /// resulting pointer is linked to the lifetime of its guarantor (if any).
+ fn link_addr_of(&mut self, expr: &hir::Expr,
+ mutability: hir::Mutability, base: &hir::Expr) {
+ debug!("link_addr_of(expr={:?}, base={:?})", expr, base);
- let cmt = {
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- ignore_err!(mc.cat_expr(base))
- };
+ let cmt = {
+ let mc = mc::MemCategorizationContext::new(self);
+ ignore_err!(mc.cat_expr(base))
+ };
- debug!("link_addr_of: cmt={:?}", cmt);
+ debug!("link_addr_of: cmt={:?}", cmt);
- link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt);
-}
+ self.link_region_from_node_type(expr.span, expr.id, mutability, cmt);
+ }
-/// Computes the guarantors for any ref bindings in a `let` and
-/// then ensures that the lifetime of the resulting pointer is
-/// linked to the lifetime of the initialization expression.
-fn link_local(rcx: &Rcx, local: &hir::Local) {
- debug!("regionck::for_local()");
- let init_expr = match local.init {
- None => { return; }
- Some(ref expr) => &**expr,
- };
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
- link_pattern(rcx, mc, discr_cmt, &local.pat);
-}
+ /// Computes the guarantors for any ref bindings in a `let` and
+ /// then ensures that the lifetime of the resulting pointer is
+ /// linked to the lifetime of the initialization expression.
+ fn link_local(&self, local: &hir::Local) {
+ debug!("regionck::for_local()");
+ let init_expr = match local.init {
+ None => { return; }
+ Some(ref expr) => &**expr,
+ };
+ let mc = mc::MemCategorizationContext::new(self);
+ let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
+ self.link_pattern(mc, discr_cmt, &local.pat);
+ }
-/// Computes the guarantors for any ref bindings in a match and
-/// then ensures that the lifetime of the resulting pointer is
-/// linked to the lifetime of its guarantor (if any).
-fn link_match(rcx: &Rcx, discr: &hir::Expr, arms: &[hir::Arm]) {
- debug!("regionck::for_match()");
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- let discr_cmt = ignore_err!(mc.cat_expr(discr));
- debug!("discr_cmt={:?}", discr_cmt);
- for arm in arms {
- for root_pat in &arm.pats {
- link_pattern(rcx, mc, discr_cmt.clone(), &root_pat);
+ /// Computes the guarantors for any ref bindings in a match and
+ /// then ensures that the lifetime of the resulting pointer is
+ /// linked to the lifetime of its guarantor (if any).
+ fn link_match(&self, discr: &hir::Expr, arms: &[hir::Arm]) {
+ debug!("regionck::for_match()");
+ let mc = mc::MemCategorizationContext::new(self);
+ let discr_cmt = ignore_err!(mc.cat_expr(discr));
+ debug!("discr_cmt={:?}", discr_cmt);
+ for arm in arms {
+ for root_pat in &arm.pats {
+ self.link_pattern(mc, discr_cmt.clone(), &root_pat);
+ }
}
}
-}
-/// Computes the guarantors for any ref bindings in a match and
-/// then ensures that the lifetime of the resulting pointer is
-/// linked to the lifetime of its guarantor (if any).
-fn link_fn_args(rcx: &Rcx, body_scope: CodeExtent, args: &[hir::Arg]) {
- debug!("regionck::link_fn_args(body_scope={:?})", body_scope);
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- for arg in args {
- let arg_ty = rcx.fcx.node_ty(arg.id);
- let re_scope = ty::ReScope(body_scope);
- let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty);
- debug!("arg_ty={:?} arg_cmt={:?} arg={:?}",
- arg_ty,
- arg_cmt,
- arg);
- link_pattern(rcx, mc, arg_cmt, &arg.pat);
+ /// Computes the guarantors for any ref bindings in a match and
+ /// then ensures that the lifetime of the resulting pointer is
+ /// linked to the lifetime of its guarantor (if any).
+ fn link_fn_args(&self, body_scope: CodeExtent, args: &[hir::Arg]) {
+ debug!("regionck::link_fn_args(body_scope={:?})", body_scope);
+ let mc = mc::MemCategorizationContext::new(self);
+ for arg in args {
+ let arg_ty = self.node_ty(arg.id);
+ let re_scope = ty::ReScope(body_scope);
+ let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty);
+ debug!("arg_ty={:?} arg_cmt={:?} arg={:?}",
+ arg_ty,
+ arg_cmt,
+ arg);
+ self.link_pattern(mc, arg_cmt, &arg.pat);
+ }
}
-}
-/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if
-/// needed.
-fn link_pattern<'t, 'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- mc: mc::MemCategorizationContext<'t, 'a, 'tcx>,
- discr_cmt: mc::cmt<'tcx>,
- root_pat: &hir::Pat) {
- debug!("link_pattern(discr_cmt={:?}, root_pat={:?})",
- discr_cmt,
- root_pat);
- let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| {
- match sub_pat.node {
- // `ref x` pattern
- PatKind::Ident(hir::BindByRef(mutbl), _, _) => {
- link_region_from_node_type(
- rcx, sub_pat.span, sub_pat.id,
- mutbl, sub_cmt);
- }
+ /// Link lifetimes of any ref bindings in `root_pat` to the pointers found
+ /// in the discriminant, if needed.
+ fn link_pattern<'t>(&self,
+ mc: mc::MemCategorizationContext<'a, 'gcx, 'tcx>,
+ discr_cmt: mc::cmt<'tcx>,
+ root_pat: &hir::Pat) {
+ debug!("link_pattern(discr_cmt={:?}, root_pat={:?})",
+ discr_cmt,
+ root_pat);
+ let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| {
+ match sub_pat.node {
+ // `ref x` pattern
+ PatKind::Ident(hir::BindByRef(mutbl), _, _) => {
+ self.link_region_from_node_type(sub_pat.span, sub_pat.id,
+ mutbl, sub_cmt);
+ }
- // `[_, ..slice, _]` pattern
- PatKind::Vec(_, Some(ref slice_pat), _) => {
- match mc.cat_slice_pattern(sub_cmt, &slice_pat) {
- Ok((slice_cmt, slice_mutbl, slice_r)) => {
- link_region(rcx, sub_pat.span, &slice_r,
- ty::BorrowKind::from_mutbl(slice_mutbl),
- slice_cmt);
+ // `[_, ..slice, _]` pattern
+ PatKind::Vec(_, Some(ref slice_pat), _) => {
+ match mc.cat_slice_pattern(sub_cmt, &slice_pat) {
+ Ok((slice_cmt, slice_mutbl, slice_r)) => {
+ self.link_region(sub_pat.span, &slice_r,
+ ty::BorrowKind::from_mutbl(slice_mutbl),
+ slice_cmt);
+ }
+ Err(()) => {}
}
- Err(()) => {}
}
+ _ => {}
}
- _ => {}
- }
- });
-}
+ });
+ }
-/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
-/// autoref'd.
-fn link_autoref(rcx: &Rcx,
- expr: &hir::Expr,
- autoderefs: usize,
- autoref: &adjustment::AutoRef)
-{
- debug!("link_autoref(autoref={:?})", autoref);
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs));
- debug!("expr_cmt={:?}", expr_cmt);
-
- match *autoref {
- adjustment::AutoPtr(r, m) => {
- link_region(rcx, expr.span, r,
- ty::BorrowKind::from_mutbl(m), expr_cmt);
- }
+ /// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
+ /// autoref'd.
+ fn link_autoref(&self,
+ expr: &hir::Expr,
+ autoderefs: usize,
+ autoref: &adjustment::AutoRef)
+ {
+ debug!("link_autoref(autoref={:?})", autoref);
+ let mc = mc::MemCategorizationContext::new(self);
+ let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs));
+ debug!("expr_cmt={:?}", expr_cmt);
+
+ match *autoref {
+ adjustment::AutoPtr(r, m) => {
+ self.link_region(expr.span, r,
+ ty::BorrowKind::from_mutbl(m), expr_cmt);
+ }
- adjustment::AutoUnsafe(m) => {
- let r = ty::ReScope(rcx.tcx().region_maps.node_extent(expr.id));
- link_region(rcx, expr.span, &r, ty::BorrowKind::from_mutbl(m), expr_cmt);
+ adjustment::AutoUnsafe(m) => {
+ let r = ty::ReScope(self.tcx.region_maps.node_extent(expr.id));
+ self.link_region(expr.span, &r, ty::BorrowKind::from_mutbl(m), expr_cmt);
+ }
}
}
-}
-/// Computes the guarantor for cases where the `expr` is being passed by implicit reference and
-/// must outlive `callee_scope`.
-fn link_by_ref(rcx: &Rcx,
- expr: &hir::Expr,
- callee_scope: CodeExtent) {
- debug!("link_by_ref(expr={:?}, callee_scope={:?})",
- expr, callee_scope);
- let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
- let expr_cmt = ignore_err!(mc.cat_expr(expr));
- let borrow_region = ty::ReScope(callee_scope);
- link_region(rcx, expr.span, &borrow_region, ty::ImmBorrow, expr_cmt);
-}
+ /// Computes the guarantor for cases where the `expr` is being passed by implicit reference and
+ /// must outlive `callee_scope`.
+ fn link_by_ref(&self,
+ expr: &hir::Expr,
+ callee_scope: CodeExtent) {
+ debug!("link_by_ref(expr={:?}, callee_scope={:?})",
+ expr, callee_scope);
+ let mc = mc::MemCategorizationContext::new(self);
+ let expr_cmt = ignore_err!(mc.cat_expr(expr));
+ let borrow_region = ty::ReScope(callee_scope);
+ self.link_region(expr.span, &borrow_region, ty::ImmBorrow, expr_cmt);
+ }
-/// Like `link_region()`, except that the region is extracted from the type of `id`, which must be
-/// some reference (`&T`, `&str`, etc).
-fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- span: Span,
- id: ast::NodeId,
- mutbl: hir::Mutability,
- cmt_borrowed: mc::cmt<'tcx>) {
- debug!("link_region_from_node_type(id={:?}, mutbl={:?}, cmt_borrowed={:?})",
- id, mutbl, cmt_borrowed);
-
- let rptr_ty = rcx.resolve_node_type(id);
- if let ty::TyRef(&r, _) = rptr_ty.sty {
- debug!("rptr_ty={}", rptr_ty);
- link_region(rcx, span, &r, ty::BorrowKind::from_mutbl(mutbl),
- cmt_borrowed);
+ /// Like `link_region()`, except that the region is extracted from the type of `id`,
+ /// which must be some reference (`&T`, `&str`, etc).
+ fn link_region_from_node_type(&self,
+ span: Span,
+ id: ast::NodeId,
+ mutbl: hir::Mutability,
+ cmt_borrowed: mc::cmt<'tcx>) {
+ debug!("link_region_from_node_type(id={:?}, mutbl={:?}, cmt_borrowed={:?})",
+ id, mutbl, cmt_borrowed);
+
+ let rptr_ty = self.resolve_node_type(id);
+ if let ty::TyRef(&r, _) = rptr_ty.sty {
+ debug!("rptr_ty={}", rptr_ty);
+ self.link_region(span, &r, ty::BorrowKind::from_mutbl(mutbl),
+ cmt_borrowed);
+ }
}
-}
-/// Informs the inference engine that `borrow_cmt` is being borrowed with kind `borrow_kind` and
-/// lifetime `borrow_region`. In order to ensure borrowck is satisfied, this may create constraints
-/// between regions, as explained in `link_reborrowed_region()`.
-fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- span: Span,
- borrow_region: &ty::Region,
- borrow_kind: ty::BorrowKind,
- borrow_cmt: mc::cmt<'tcx>) {
- let mut borrow_cmt = borrow_cmt;
- let mut borrow_kind = borrow_kind;
+ /// Informs the inference engine that `borrow_cmt` is being borrowed with
+ /// kind `borrow_kind` and lifetime `borrow_region`.
+ /// In order to ensure borrowck is satisfied, this may create constraints
+ /// between regions, as explained in `link_reborrowed_region()`.
+ fn link_region(&self,
+ span: Span,
+ borrow_region: &ty::Region,
+ borrow_kind: ty::BorrowKind,
+ borrow_cmt: mc::cmt<'tcx>) {
+ let mut borrow_cmt = borrow_cmt;
+ let mut borrow_kind = borrow_kind;
+
+ let origin = infer::DataBorrowed(borrow_cmt.ty, span);
+ self.type_must_outlive(origin, borrow_cmt.ty, *borrow_region);
+
+ loop {
+ debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})",
+ borrow_region,
+ borrow_kind,
+ borrow_cmt);
+ match borrow_cmt.cat.clone() {
+ Categorization::Deref(ref_cmt, _,
+ mc::Implicit(ref_kind, ref_region)) |
+ Categorization::Deref(ref_cmt, _,
+ mc::BorrowedPtr(ref_kind, ref_region)) => {
+ match self.link_reborrowed_region(span,
+ borrow_region, borrow_kind,
+ ref_cmt, ref_region, ref_kind,
+ borrow_cmt.note) {
+ Some((c, k)) => {
+ borrow_cmt = c;
+ borrow_kind = k;
+ }
+ None => {
+ return;
+ }
+ }
+ }
- let origin = infer::DataBorrowed(borrow_cmt.ty, span);
- type_must_outlive(rcx, origin, borrow_cmt.ty, *borrow_region);
+ Categorization::Downcast(cmt_base, _) |
+ Categorization::Deref(cmt_base, _, mc::Unique) |
+ Categorization::Interior(cmt_base, _) => {
+ // Borrowing interior or owned data requires the base
+ // to be valid and borrowable in the same fashion.
+ borrow_cmt = cmt_base;
+ borrow_kind = borrow_kind;
+ }
- loop {
- debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})",
- borrow_region,
- borrow_kind,
- borrow_cmt);
- match borrow_cmt.cat.clone() {
- Categorization::Deref(ref_cmt, _,
- mc::Implicit(ref_kind, ref_region)) |
- Categorization::Deref(ref_cmt, _,
- mc::BorrowedPtr(ref_kind, ref_region)) => {
- match link_reborrowed_region(rcx, span,
- borrow_region, borrow_kind,
- ref_cmt, ref_region, ref_kind,
- borrow_cmt.note) {
- Some((c, k)) => {
- borrow_cmt = c;
- borrow_kind = k;
+ Categorization::Deref(_, _, mc::UnsafePtr(..)) |
+ Categorization::StaticItem |
+ Categorization::Upvar(..) |
+ Categorization::Local(..) |
+ Categorization::Rvalue(..) => {
+ // These are all "base cases" with independent lifetimes
+ // that are not subject to inference
+ return;
+ }
+ }
+ }
+ }
+
+ /// This is the most complicated case: the path being borrowed is
+ /// itself the referent of a borrowed pointer. Let me give an
+ /// example fragment of code to make clear(er) the situation:
+ ///
+ /// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
+ /// ...
+ /// &'z *r // the reborrow has lifetime 'z
+ ///
+ /// Now, in this case, our primary job is to add the inference
+ /// constraint that `'z <= 'a`. Given this setup, let's clarify the
+ /// parameters in (roughly) terms of the example:
+ ///
+ /// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
+ /// borrow_region ^~ ref_region ^~
+ /// borrow_kind ^~ ref_kind ^~
+ /// ref_cmt ^
+ ///
+ /// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
+ ///
+ /// Unfortunately, there are some complications beyond the simple
+ /// scenario I just painted:
+ ///
+ /// 1. The reference `r` might in fact be a "by-ref" upvar. In that
+ /// case, we have two jobs. First, we are inferring whether this reference
+ /// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
+ /// adjust that based on this borrow (e.g., if this is an `&mut` borrow,
+ /// then `r` must be an `&mut` reference). Second, whenever we link
+ /// two regions (here, `'z <= 'a`), we supply a *cause*, and in this
+ /// case we adjust the cause to indicate that the reference being
+ /// "reborrowed" is itself an upvar. This provides a nicer error message
+ /// should something go wrong.
+ ///
+ /// 2. There may in fact be more levels of reborrowing. In the
+ /// example, I said the borrow was like `&'z *r`, but it might
+ /// in fact be a borrow like `&'z **q` where `q` has type `&'a
+ /// &'b mut T`. In that case, we want to ensure that `'z <= 'a`
+ /// and `'z <= 'b`. This is explained more below.
+ ///
+ /// The return value of this function indicates whether we need to
+ /// recurse and process `ref_cmt` (see case 2 above).
+ fn link_reborrowed_region(&self,
+ span: Span,
+ borrow_region: &ty::Region,
+ borrow_kind: ty::BorrowKind,
+ ref_cmt: mc::cmt<'tcx>,
+ ref_region: ty::Region,
+ mut ref_kind: ty::BorrowKind,
+ note: mc::Note)
+ -> Option<(mc::cmt<'tcx>, ty::BorrowKind)>
+ {
+ // Possible upvar ID we may need later to create an entry in the
+ // maybe link map.
+
+ // Detect by-ref upvar `x`:
+ let cause = match note {
+ mc::NoteUpvarRef(ref upvar_id) => {
+ let upvar_capture_map = &self.tables.borrow_mut().upvar_capture_map;
+ match upvar_capture_map.get(upvar_id) {
+ Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => {
+ // The mutability of the upvar may have been modified
+ // by the above adjustment, so update our local variable.
+ ref_kind = upvar_borrow.kind;
+
+ infer::ReborrowUpvar(span, *upvar_id)
}
- None => {
- return;
+ _ => {
+ span_bug!( span, "Illegal upvar id: {:?}", upvar_id);
}
}
}
+ mc::NoteClosureEnv(ref upvar_id) => {
+ // We don't have any mutability changes to propagate, but
+ // we do want to note that an upvar reborrow caused this
+ // link
+ infer::ReborrowUpvar(span, *upvar_id)
+ }
+ _ => {
+ infer::Reborrow(span)
+ }
+ };
- Categorization::Downcast(cmt_base, _) |
- Categorization::Deref(cmt_base, _, mc::Unique) |
- Categorization::Interior(cmt_base, _) => {
- // Borrowing interior or owned data requires the base
- // to be valid and borrowable in the same fashion.
- borrow_cmt = cmt_base;
- borrow_kind = borrow_kind;
+ debug!("link_reborrowed_region: {:?} <= {:?}",
+ borrow_region,
+ ref_region);
+ self.sub_regions(cause, *borrow_region, ref_region);
+
+ // If we end up needing to recurse and establish a region link
+ // with `ref_cmt`, calculate what borrow kind we will end up
+ // needing. This will be used below.
+ //
+ // One interesting twist is that we can weaken the borrow kind
+ // when we recurse: to reborrow an `&mut` referent as mutable,
+ // borrowck requires a unique path to the `&mut` reference but not
+ // necessarily a *mutable* path.
+ let new_borrow_kind = match borrow_kind {
+ ty::ImmBorrow =>
+ ty::ImmBorrow,
+ ty::MutBorrow | ty::UniqueImmBorrow =>
+ ty::UniqueImmBorrow
+ };
+
+ // Decide whether we need to recurse and link any regions within
+ // the `ref_cmt`. This is concerned for the case where the value
+ // being reborrowed is in fact a borrowed pointer found within
+ // another borrowed pointer. For example:
+ //
+ // let p: &'b &'a mut T = ...;
+ // ...
+ // &'z **p
+ //
+ // What makes this case particularly tricky is that, if the data
+ // being borrowed is a `&mut` or `&uniq` borrow, borrowck requires
+ // not only that `'z <= 'a`, (as before) but also `'z <= 'b`
+ // (otherwise the user might mutate through the `&mut T` reference
+ // after `'b` expires and invalidate the borrow we are looking at
+ // now).
+ //
+ // So let's re-examine our parameters in light of this more
+ // complicated (possible) scenario:
+ //
+ // A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T`
+ // borrow_region ^~ ref_region ^~
+ // borrow_kind ^~ ref_kind ^~
+ // ref_cmt ^~~
+ //
+ // (Note that since we have not examined `ref_cmt.cat`, we don't
+ // know whether this scenario has occurred; but I wanted to show
+ // how all the types get adjusted.)
+ match ref_kind {
+ ty::ImmBorrow => {
+ // The reference being reborrowed is a sharable ref of
+ // type `&'a T`. In this case, it doesn't matter where we
+ // *found* the `&T` pointer, the memory it references will
+ // be valid and immutable for `'a`. So we can stop here.
+ //
+ // (Note that the `borrow_kind` must also be ImmBorrow or
+ // else the user is borrowed imm memory as mut memory,
+ // which means they'll get an error downstream in borrowck
+ // anyhow.)
+ return None;
}
- Categorization::Deref(_, _, mc::UnsafePtr(..)) |
- Categorization::StaticItem |
- Categorization::Upvar(..) |
- Categorization::Local(..) |
- Categorization::Rvalue(..) => {
- // These are all "base cases" with independent lifetimes
- // that are not subject to inference
- return;
+ ty::MutBorrow | ty::UniqueImmBorrow => {
+ // The reference being reborrowed is either an `&mut T` or
+ // `&uniq T`. This is the case where recursion is needed.
+ return Some((ref_cmt, new_borrow_kind));
}
}
}
-}
-/// This is the most complicated case: the path being borrowed is
-/// itself the referent of a borrowed pointer. Let me give an
-/// example fragment of code to make clear(er) the situation:
-///
-/// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
-/// ...
-/// &'z *r // the reborrow has lifetime 'z
-///
-/// Now, in this case, our primary job is to add the inference
-/// constraint that `'z <= 'a`. Given this setup, let's clarify the
-/// parameters in (roughly) terms of the example:
-///
-/// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
-/// borrow_region ^~ ref_region ^~
-/// borrow_kind ^~ ref_kind ^~
-/// ref_cmt ^
-///
-/// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
-///
-/// Unfortunately, there are some complications beyond the simple
-/// scenario I just painted:
-///
-/// 1. The reference `r` might in fact be a "by-ref" upvar. In that
-/// case, we have two jobs. First, we are inferring whether this reference
-/// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
-/// adjust that based on this borrow (e.g., if this is an `&mut` borrow,
-/// then `r` must be an `&mut` reference). Second, whenever we link
-/// two regions (here, `'z <= 'a`), we supply a *cause*, and in this
-/// case we adjust the cause to indicate that the reference being
-/// "reborrowed" is itself an upvar. This provides a nicer error message
-/// should something go wrong.
-///
-/// 2. There may in fact be more levels of reborrowing. In the
-/// example, I said the borrow was like `&'z *r`, but it might
-/// in fact be a borrow like `&'z **q` where `q` has type `&'a
-/// &'b mut T`. In that case, we want to ensure that `'z <= 'a`
-/// and `'z <= 'b`. This is explained more below.
-///
-/// The return value of this function indicates whether we need to
-/// recurse and process `ref_cmt` (see case 2 above).
-fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- span: Span,
- borrow_region: &ty::Region,
- borrow_kind: ty::BorrowKind,
- ref_cmt: mc::cmt<'tcx>,
- ref_region: ty::Region,
- mut ref_kind: ty::BorrowKind,
- note: mc::Note)
- -> Option<(mc::cmt<'tcx>, ty::BorrowKind)>
-{
- // Possible upvar ID we may need later to create an entry in the
- // maybe link map.
-
- // Detect by-ref upvar `x`:
- let cause = match note {
- mc::NoteUpvarRef(ref upvar_id) => {
- let upvar_capture_map = &rcx.fcx.inh.tables.borrow_mut().upvar_capture_map;
- match upvar_capture_map.get(upvar_id) {
- Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => {
- // The mutability of the upvar may have been modified
- // by the above adjustment, so update our local variable.
- ref_kind = upvar_borrow.kind;
-
- infer::ReborrowUpvar(span, *upvar_id)
- }
- _ => {
- span_bug!( span, "Illegal upvar id: {:?}", upvar_id);
- }
- }
- }
- mc::NoteClosureEnv(ref upvar_id) => {
- // We don't have any mutability changes to propagate, but
- // we do want to note that an upvar reborrow caused this
- // link
- infer::ReborrowUpvar(span, *upvar_id)
- }
- _ => {
- infer::Reborrow(span)
- }
- };
-
- debug!("link_reborrowed_region: {:?} <= {:?}",
- borrow_region,
- ref_region);
- rcx.fcx.mk_subr(cause, *borrow_region, ref_region);
-
- // If we end up needing to recurse and establish a region link
- // with `ref_cmt`, calculate what borrow kind we will end up
- // needing. This will be used below.
- //
- // One interesting twist is that we can weaken the borrow kind
- // when we recurse: to reborrow an `&mut` referent as mutable,
- // borrowck requires a unique path to the `&mut` reference but not
- // necessarily a *mutable* path.
- let new_borrow_kind = match borrow_kind {
- ty::ImmBorrow =>
- ty::ImmBorrow,
- ty::MutBorrow | ty::UniqueImmBorrow =>
- ty::UniqueImmBorrow
- };
-
- // Decide whether we need to recurse and link any regions within
- // the `ref_cmt`. This is concerned for the case where the value
- // being reborrowed is in fact a borrowed pointer found within
- // another borrowed pointer. For example:
- //
- // let p: &'b &'a mut T = ...;
- // ...
- // &'z **p
- //
- // What makes this case particularly tricky is that, if the data
- // being borrowed is a `&mut` or `&uniq` borrow, borrowck requires
- // not only that `'z <= 'a`, (as before) but also `'z <= 'b`
- // (otherwise the user might mutate through the `&mut T` reference
- // after `'b` expires and invalidate the borrow we are looking at
- // now).
- //
- // So let's re-examine our parameters in light of this more
- // complicated (possible) scenario:
- //
- // A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T`
- // borrow_region ^~ ref_region ^~
- // borrow_kind ^~ ref_kind ^~
- // ref_cmt ^~~
- //
- // (Note that since we have not examined `ref_cmt.cat`, we don't
- // know whether this scenario has occurred; but I wanted to show
- // how all the types get adjusted.)
- match ref_kind {
- ty::ImmBorrow => {
- // The reference being reborrowed is a sharable ref of
- // type `&'a T`. In this case, it doesn't matter where we
- // *found* the `&T` pointer, the memory it references will
- // be valid and immutable for `'a`. So we can stop here.
- //
- // (Note that the `borrow_kind` must also be ImmBorrow or
- // else the user is borrowed imm memory as mut memory,
- // which means they'll get an error downstream in borrowck
- // anyhow.)
- return None;
+ /// Checks that the values provided for type/region arguments in a given
+ /// expression are well-formed and in-scope.
+ fn substs_wf_in_scope(&mut self,
+ origin: infer::ParameterOrigin,
+ substs: &Substs<'tcx>,
+ expr_span: Span,
+ expr_region: ty::Region) {
+ debug!("substs_wf_in_scope(substs={:?}, \
+ expr_region={:?}, \
+ origin={:?}, \
+ expr_span={:?})",
+ substs, expr_region, origin, expr_span);
+
+ let origin = infer::ParameterInScope(origin, expr_span);
+
+ for ®ion in &substs.regions {
+ self.sub_regions(origin.clone(), expr_region, region);
}
- ty::MutBorrow | ty::UniqueImmBorrow => {
- // The reference being reborrowed is either an `&mut T` or
- // `&uniq T`. This is the case where recursion is needed.
- return Some((ref_cmt, new_borrow_kind));
+ for &ty in &substs.types {
+ let ty = self.resolve_type(ty);
+ self.type_must_outlive(origin.clone(), ty, expr_region);
}
}
-}
-/// Checks that the values provided for type/region arguments in a given
-/// expression are well-formed and in-scope.
-pub fn substs_wf_in_scope<'a,'tcx>(rcx: &mut Rcx<'a,'tcx>,
- origin: infer::ParameterOrigin,
- substs: &Substs<'tcx>,
- expr_span: Span,
- expr_region: ty::Region) {
- debug!("substs_wf_in_scope(substs={:?}, \
- expr_region={:?}, \
- origin={:?}, \
- expr_span={:?})",
- substs, expr_region, origin, expr_span);
-
- let origin = infer::ParameterInScope(origin, expr_span);
-
- for ®ion in &substs.regions {
- rcx.fcx.mk_subr(origin.clone(), expr_region, region);
- }
+ /// Ensures that type is well-formed in `region`, which implies (among
+ /// other things) that all borrowed data reachable via `ty` outlives
+ /// `region`.
+ pub fn type_must_outlive(&self,
+ origin: infer::SubregionOrigin<'tcx>,
+ ty: Ty<'tcx>,
+ region: ty::Region)
+ {
+ let ty = self.resolve_type(ty);
- for &ty in &substs.types {
- let ty = rcx.resolve_type(ty);
- type_must_outlive(rcx, origin.clone(), ty, expr_region);
- }
-}
+ debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})",
+ ty,
+ region,
+ origin);
-/// Ensures that type is well-formed in `region`, which implies (among
-/// other things) that all borrowed data reachable via `ty` outlives
-/// `region`.
-pub fn type_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- origin: infer::SubregionOrigin<'tcx>,
- ty: Ty<'tcx>,
- region: ty::Region)
-{
- let ty = rcx.resolve_type(ty);
-
- debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})",
- ty,
- region,
- origin);
-
- assert!(!ty.has_escaping_regions());
-
- let components = ty::outlives::components(rcx.infcx(), ty);
- components_must_outlive(rcx, origin, components, region);
-}
+ assert!(!ty.has_escaping_regions());
-fn components_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- origin: infer::SubregionOrigin<'tcx>,
- components: Vec<ty::outlives::Component<'tcx>>,
- region: ty::Region)
-{
- for component in components {
- let origin = origin.clone();
- match component {
- ty::outlives::Component::Region(region1) => {
- rcx.fcx.mk_subr(origin, region, region1);
- }
- ty::outlives::Component::Param(param_ty) => {
- param_ty_must_outlive(rcx, origin, region, param_ty);
- }
- ty::outlives::Component::Projection(projection_ty) => {
- projection_must_outlive(rcx, origin, region, projection_ty);
- }
- ty::outlives::Component::EscapingProjection(subcomponents) => {
- components_must_outlive(rcx, origin, subcomponents, region);
- }
- ty::outlives::Component::UnresolvedInferenceVariable(v) => {
- // ignore this, we presume it will yield an error
- // later, since if a type variable is not resolved by
- // this point it never will be
- rcx.tcx().sess.delay_span_bug(
- origin.span(),
- &format!("unresolved inference variable in outlives: {:?}", v));
+ let components = self.outlives_components(ty);
+ self.components_must_outlive(origin, components, region);
+ }
+
+ fn components_must_outlive(&self,
+ origin: infer::SubregionOrigin<'tcx>,
+ components: Vec<ty::outlives::Component<'tcx>>,
+ region: ty::Region)
+ {
+ for component in components {
+ let origin = origin.clone();
+ match component {
+ ty::outlives::Component::Region(region1) => {
+ self.sub_regions(origin, region, region1);
+ }
+ ty::outlives::Component::Param(param_ty) => {
+ self.param_ty_must_outlive(origin, region, param_ty);
+ }
+ ty::outlives::Component::Projection(projection_ty) => {
+ self.projection_must_outlive(origin, region, projection_ty);
+ }
+ ty::outlives::Component::EscapingProjection(subcomponents) => {
+ self.components_must_outlive(origin, subcomponents, region);
+ }
+ ty::outlives::Component::UnresolvedInferenceVariable(v) => {
+ // ignore this, we presume it will yield an error
+ // later, since if a type variable is not resolved by
+ // this point it never will be
+ self.tcx.sess.delay_span_bug(
+ origin.span(),
+ &format!("unresolved inference variable in outlives: {:?}", v));
+ }
}
}
}
-}
-
-fn param_ty_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- origin: infer::SubregionOrigin<'tcx>,
- region: ty::Region,
- param_ty: ty::ParamTy) {
- debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
- region, param_ty, origin);
- let verify_bound = param_bound(rcx, param_ty);
- let generic = GenericKind::Param(param_ty);
- rcx.fcx.infcx().verify_generic_bound(origin, generic, region, verify_bound);
-}
+ fn param_ty_must_outlive(&self,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region,
+ param_ty: ty::ParamTy) {
+ debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
+ region, param_ty, origin);
-fn projection_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- origin: infer::SubregionOrigin<'tcx>,
- region: ty::Region,
- projection_ty: ty::ProjectionTy<'tcx>)
-{
- debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
- region, projection_ty, origin);
-
- // This case is thorny for inference. The fundamental problem is
- // that there are many cases where we have choice, and inference
- // doesn't like choice (the current region inference in
- // particular). :) First off, we have to choose between using the
- // OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
- // OutlivesProjectionComponent rules, any one of which is
- // sufficient. If there are no inference variables involved, it's
- // not hard to pick the right rule, but if there are, we're in a
- // bit of a catch 22: if we picked which rule we were going to
- // use, we could add constraints to the region inference graph
- // that make it apply, but if we don't add those constraints, the
- // rule might not apply (but another rule might). For now, we err
- // on the side of adding too few edges into the graph.
-
- // Compute the bounds we can derive from the environment or trait
- // definition. We know that the projection outlives all the
- // regions in this list.
- let env_bounds = projection_declared_bounds(rcx, origin.span(), projection_ty);
-
- debug!("projection_must_outlive: env_bounds={:?}",
- env_bounds);
-
- // If we know that the projection outlives 'static, then we're
- // done here.
- if env_bounds.contains(&ty::ReStatic) {
- debug!("projection_must_outlive: 'static as declared bound");
- return;
+ let verify_bound = self.param_bound(param_ty);
+ let generic = GenericKind::Param(param_ty);
+ self.verify_generic_bound(origin, generic, region, verify_bound);
}
- // If declared bounds list is empty, the only applicable rule is
- // OutlivesProjectionComponent. If there are inference variables,
- // then, we can break down the outlives into more primitive
- // components without adding unnecessary edges.
- //
- // If there are *no* inference variables, however, we COULD do
- // this, but we choose not to, because the error messages are less
- // good. For example, a requirement like `T::Item: 'r` would be
- // translated to a requirement that `T: 'r`; when this is reported
- // to the user, it will thus say "T: 'r must hold so that T::Item:
- // 'r holds". But that makes it sound like the only way to fix
- // the problem is to add `T: 'r`, which isn't true. So, if there are no
- // inference variables, we use a verify constraint instead of adding
- // edges, which winds up enforcing the same condition.
- let needs_infer = {
- projection_ty.trait_ref.substs.types.iter().any(|t| t.needs_infer()) ||
- projection_ty.trait_ref.substs.regions.iter().any(|r| r.needs_infer())
- };
- if env_bounds.is_empty() && needs_infer {
- debug!("projection_must_outlive: no declared bounds");
-
- for &component_ty in &projection_ty.trait_ref.substs.types {
- type_must_outlive(rcx, origin.clone(), component_ty, region);
+ fn projection_must_outlive(&self,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region,
+ projection_ty: ty::ProjectionTy<'tcx>)
+ {
+ debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
+ region, projection_ty, origin);
+
+ // This case is thorny for inference. The fundamental problem is
+ // that there are many cases where we have choice, and inference
+ // doesn't like choice (the current region inference in
+ // particular). :) First off, we have to choose between using the
+ // OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
+ // OutlivesProjectionComponent rules, any one of which is
+ // sufficient. If there are no inference variables involved, it's
+ // not hard to pick the right rule, but if there are, we're in a
+ // bit of a catch 22: if we picked which rule we were going to
+ // use, we could add constraints to the region inference graph
+ // that make it apply, but if we don't add those constraints, the
+ // rule might not apply (but another rule might). For now, we err
+ // on the side of adding too few edges into the graph.
+
+ // Compute the bounds we can derive from the environment or trait
+ // definition. We know that the projection outlives all the
+ // regions in this list.
+ let env_bounds = self.projection_declared_bounds(origin.span(), projection_ty);
+
+ debug!("projection_must_outlive: env_bounds={:?}",
+ env_bounds);
+
+ // If we know that the projection outlives 'static, then we're
+ // done here.
+ if env_bounds.contains(&ty::ReStatic) {
+ debug!("projection_must_outlive: 'static as declared bound");
+ return;
}
- for &r in &projection_ty.trait_ref.substs.regions {
- rcx.fcx.mk_subr(origin.clone(), region, r);
- }
+ // If declared bounds list is empty, the only applicable rule is
+ // OutlivesProjectionComponent. If there are inference variables,
+ // then, we can break down the outlives into more primitive
+ // components without adding unnecessary edges.
+ //
+ // If there are *no* inference variables, however, we COULD do
+ // this, but we choose not to, because the error messages are less
+ // good. For example, a requirement like `T::Item: 'r` would be
+ // translated to a requirement that `T: 'r`; when this is reported
+ // to the user, it will thus say "T: 'r must hold so that T::Item:
+ // 'r holds". But that makes it sound like the only way to fix
+ // the problem is to add `T: 'r`, which isn't true. So, if there are no
+ // inference variables, we use a verify constraint instead of adding
+ // edges, which winds up enforcing the same condition.
+ let needs_infer = {
+ projection_ty.trait_ref.substs.types.iter().any(|t| t.needs_infer()) ||
+ projection_ty.trait_ref.substs.regions.iter().any(|r| r.needs_infer())
+ };
+ if env_bounds.is_empty() && needs_infer {
+ debug!("projection_must_outlive: no declared bounds");
- return;
- }
+ for &component_ty in &projection_ty.trait_ref.substs.types {
+ self.type_must_outlive(origin.clone(), component_ty, region);
+ }
+
+ for &r in &projection_ty.trait_ref.substs.regions {
+ self.sub_regions(origin.clone(), region, r);
+ }
- // If we find that there is a unique declared bound `'b`, and this bound
- // appears in the trait reference, then the best action is to require that `'b:'r`,
- // so do that. This is best no matter what rule we use:
- //
- // - OutlivesProjectionEnv or OutlivesProjectionTraitDef: these would translate to
- // the requirement that `'b:'r`
- // - OutlivesProjectionComponent: this would require `'b:'r` in addition to other conditions
- if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) {
- let unique_bound = env_bounds[0];
- debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound);
- if projection_ty.trait_ref.substs.regions
- .iter()
- .any(|r| env_bounds.contains(r))
- {
- debug!("projection_must_outlive: unique declared bound appears in trait ref");
- rcx.fcx.mk_subr(origin.clone(), region, unique_bound);
return;
}
- }
- // Fallback to verifying after the fact that there exists a
- // declared bound, or that all the components appearing in the
- // projection outlive; in some cases, this may add insufficient
- // edges into the inference graph, leading to inference failures
- // even though a satisfactory solution exists.
- let verify_bound = projection_bound(rcx, origin.span(), env_bounds, projection_ty);
- let generic = GenericKind::Projection(projection_ty);
- rcx.fcx.infcx().verify_generic_bound(origin, generic.clone(), region, verify_bound);
-}
-
-fn type_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, span: Span, ty: Ty<'tcx>) -> VerifyBound {
- match ty.sty {
- ty::TyParam(p) => {
- param_bound(rcx, p)
- }
- ty::TyProjection(data) => {
- let declared_bounds = projection_declared_bounds(rcx, span, data);
- projection_bound(rcx, span, declared_bounds, data)
+ // If we find that there is a unique declared bound `'b`, and this bound
+ // appears in the trait reference, then the best action is to require that `'b:'r`,
+ // so do that. This is best no matter what rule we use:
+ //
+ // - OutlivesProjectionEnv or OutlivesProjectionTraitDef: these would translate to
+ // the requirement that `'b:'r`
+ // - OutlivesProjectionComponent: this would require `'b:'r` in addition to
+ // other conditions
+ if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) {
+ let unique_bound = env_bounds[0];
+ debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound);
+ if projection_ty.trait_ref.substs.regions
+ .iter()
+ .any(|r| env_bounds.contains(r))
+ {
+ debug!("projection_must_outlive: unique declared bound appears in trait ref");
+ self.sub_regions(origin.clone(), region, unique_bound);
+ return;
+ }
}
- _ => {
- recursive_type_bound(rcx, span, ty)
+
+ // Fallback to verifying after the fact that there exists a
+ // declared bound, or that all the components appearing in the
+ // projection outlive; in some cases, this may add insufficient
+ // edges into the inference graph, leading to inference failures
+ // even though a satisfactory solution exists.
+ let verify_bound = self.projection_bound(origin.span(), env_bounds, projection_ty);
+ let generic = GenericKind::Projection(projection_ty);
+ self.verify_generic_bound(origin, generic.clone(), region, verify_bound);
+ }
+
+ fn type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound {
+ match ty.sty {
+ ty::TyParam(p) => {
+ self.param_bound(p)
+ }
+ ty::TyProjection(data) => {
+ let declared_bounds = self.projection_declared_bounds(span, data);
+ self.projection_bound(span, declared_bounds, data)
+ }
+ _ => {
+ self.recursive_type_bound(span, ty)
+ }
}
}
-}
-fn param_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, param_ty: ty::ParamTy) -> VerifyBound {
- let param_env = &rcx.infcx().parameter_environment;
+ fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound {
+ let param_env = &self.parameter_environment;
- debug!("param_bound(param_ty={:?})",
- param_ty);
+ debug!("param_bound(param_ty={:?})",
+ param_ty);
- let mut param_bounds = declared_generic_bounds_from_env(rcx, GenericKind::Param(param_ty));
+ let mut param_bounds = self.declared_generic_bounds_from_env(GenericKind::Param(param_ty));
- // Add in the default bound of fn body that applies to all in
- // scope type parameters:
- param_bounds.push(param_env.implicit_region_bound);
+ // Add in the default bound of fn body that applies to all in
+ // scope type parameters:
+ param_bounds.push(param_env.implicit_region_bound);
- VerifyBound::AnyRegion(param_bounds)
-}
+ VerifyBound::AnyRegion(param_bounds)
+ }
-fn projection_declared_bounds<'a, 'tcx>(rcx: &Rcx<'a,'tcx>,
- span: Span,
- projection_ty: ty::ProjectionTy<'tcx>)
- -> Vec<ty::Region>
-{
- // First assemble bounds from where clauses and traits.
+ fn projection_declared_bounds(&self,
+ span: Span,
+ projection_ty: ty::ProjectionTy<'tcx>)
+ -> Vec<ty::Region>
+ {
+ // First assemble bounds from where clauses and traits.
- let mut declared_bounds =
- declared_generic_bounds_from_env(rcx, GenericKind::Projection(projection_ty));
+ let mut declared_bounds =
+ self.declared_generic_bounds_from_env(GenericKind::Projection(projection_ty));
- declared_bounds.extend_from_slice(
- &declared_projection_bounds_from_trait(rcx, span, projection_ty));
+ declared_bounds.extend_from_slice(
+ &self.declared_projection_bounds_from_trait(span, projection_ty));
- declared_bounds
-}
+ declared_bounds
+ }
-fn projection_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- span: Span,
- declared_bounds: Vec<ty::Region>,
- projection_ty: ty::ProjectionTy<'tcx>)
- -> VerifyBound {
- debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})",
- declared_bounds, projection_ty);
+ fn projection_bound(&self,
+ span: Span,
+ declared_bounds: Vec<ty::Region>,
+ projection_ty: ty::ProjectionTy<'tcx>)
+ -> VerifyBound {
+ debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})",
+ declared_bounds, projection_ty);
- // see the extensive comment in projection_must_outlive
+ // see the extensive comment in projection_must_outlive
- let ty = rcx.tcx().mk_projection(projection_ty.trait_ref, projection_ty.item_name);
- let recursive_bound = recursive_type_bound(rcx, span, ty);
+ let ty = self.tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name);
+ let recursive_bound = self.recursive_type_bound(span, ty);
- VerifyBound::AnyRegion(declared_bounds).or(recursive_bound)
-}
+ VerifyBound::AnyRegion(declared_bounds).or(recursive_bound)
+ }
-fn recursive_type_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- span: Span,
- ty: Ty<'tcx>)
- -> VerifyBound {
- let mut bounds = vec![];
+ fn recursive_type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound {
+ let mut bounds = vec![];
- for subty in ty.walk_shallow() {
- bounds.push(type_bound(rcx, span, subty));
- }
+ for subty in ty.walk_shallow() {
+ bounds.push(self.type_bound(span, subty));
+ }
- let mut regions = ty.regions();
- regions.retain(|r| !r.is_bound()); // ignore late-bound regions
- bounds.push(VerifyBound::AllRegions(regions));
+ let mut regions = ty.regions();
+ regions.retain(|r| !r.is_bound()); // ignore late-bound regions
+ bounds.push(VerifyBound::AllRegions(regions));
- // remove bounds that must hold, since they are not interesting
- bounds.retain(|b| !b.must_hold());
+ // remove bounds that must hold, since they are not interesting
+ bounds.retain(|b| !b.must_hold());
- if bounds.len() == 1 {
- bounds.pop().unwrap()
- } else {
- VerifyBound::AllBounds(bounds)
+ if bounds.len() == 1 {
+ bounds.pop().unwrap()
+ } else {
+ VerifyBound::AllBounds(bounds)
+ }
}
-}
-fn declared_generic_bounds_from_env<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- generic: GenericKind<'tcx>)
- -> Vec<ty::Region>
-{
- let param_env = &rcx.infcx().parameter_environment;
-
- // To start, collect bounds from user:
- let mut param_bounds = rcx.tcx().required_region_bounds(generic.to_ty(rcx.tcx()),
- param_env.caller_bounds.clone());
-
- // Next, collect regions we scraped from the well-formedness
- // constraints in the fn signature. To do that, we walk the list
- // of known relations from the fn ctxt.
- //
- // This is crucial because otherwise code like this fails:
- //
- // fn foo<'a, A>(x: &'a A) { x.bar() }
- //
- // The problem is that the type of `x` is `&'a A`. To be
- // well-formed, then, A must be lower-generic by `'a`, but we
- // don't know that this holds from first principles.
- for &(r, p) in &rcx.region_bound_pairs {
- debug!("generic={:?} p={:?}",
- generic,
- p);
- if generic == p {
- param_bounds.push(r);
+ fn declared_generic_bounds_from_env(&self, generic: GenericKind<'tcx>)
+ -> Vec<ty::Region>
+ {
+ let param_env = &self.parameter_environment;
+
+ // To start, collect bounds from user:
+ let mut param_bounds = self.tcx.required_region_bounds(generic.to_ty(self.tcx),
+ param_env.caller_bounds.clone());
+
+ // Next, collect regions we scraped from the well-formedness
+ // constraints in the fn signature. To do that, we walk the list
+ // of known relations from the fn ctxt.
+ //
+ // This is crucial because otherwise code like this fails:
+ //
+ // fn foo<'a, A>(x: &'a A) { x.bar() }
+ //
+ // The problem is that the type of `x` is `&'a A`. To be
+ // well-formed, then, A must be lower-generic by `'a`, but we
+ // don't know that this holds from first principles.
+ for &(r, p) in &self.region_bound_pairs {
+ debug!("generic={:?} p={:?}",
+ generic,
+ p);
+ if generic == p {
+ param_bounds.push(r);
+ }
}
- }
- param_bounds
-}
+ param_bounds
+ }
-fn declared_projection_bounds_from_trait<'a,'tcx>(rcx: &Rcx<'a, 'tcx>,
- span: Span,
- projection_ty: ty::ProjectionTy<'tcx>)
- -> Vec<ty::Region>
-{
- let fcx = rcx.fcx;
- let tcx = fcx.tcx();
- let infcx = fcx.infcx();
-
- debug!("projection_bounds(projection_ty={:?})",
- projection_ty);
-
- let ty = tcx.mk_projection(projection_ty.trait_ref.clone(), projection_ty.item_name);
-
- // Say we have a projection `<T as SomeTrait<'a>>::SomeType`. We are interested
- // in looking for a trait definition like:
- //
- // ```
- // trait SomeTrait<'a> {
- // type SomeType : 'a;
- // }
- // ```
- //
- // we can thus deduce that `<T as SomeTrait<'a>>::SomeType : 'a`.
- let trait_predicates = tcx.lookup_predicates(projection_ty.trait_ref.def_id);
- let predicates = trait_predicates.predicates.as_slice().to_vec();
- traits::elaborate_predicates(tcx, predicates)
- .filter_map(|predicate| {
- // we're only interesting in `T : 'a` style predicates:
- let outlives = match predicate {
- ty::Predicate::TypeOutlives(data) => data,
- _ => { return None; }
- };
+ fn declared_projection_bounds_from_trait(&self,
+ span: Span,
+ projection_ty: ty::ProjectionTy<'tcx>)
+ -> Vec<ty::Region>
+ {
+ debug!("projection_bounds(projection_ty={:?})",
+ projection_ty);
- debug!("projection_bounds: outlives={:?} (1)",
- outlives);
+ let ty = self.tcx.mk_projection(projection_ty.trait_ref.clone(),
+ projection_ty.item_name);
- // apply the substitutions (and normalize any projected types)
- let outlives = fcx.instantiate_type_scheme(span,
- projection_ty.trait_ref.substs,
- &outlives);
+ // Say we have a projection `<T as SomeTrait<'a>>::SomeType`. We are interested
+ // in looking for a trait definition like:
+ //
+ // ```
+ // trait SomeTrait<'a> {
+ // type SomeType : 'a;
+ // }
+ // ```
+ //
+ // we can thus deduce that `<T as SomeTrait<'a>>::SomeType : 'a`.
+ let trait_predicates = self.tcx.lookup_predicates(projection_ty.trait_ref.def_id);
+ let predicates = trait_predicates.predicates.as_slice().to_vec();
+ traits::elaborate_predicates(self.tcx, predicates)
+ .filter_map(|predicate| {
+ // we're only interesting in `T : 'a` style predicates:
+ let outlives = match predicate {
+ ty::Predicate::TypeOutlives(data) => data,
+ _ => { return None; }
+ };
- debug!("projection_bounds: outlives={:?} (2)",
- outlives);
+ debug!("projection_bounds: outlives={:?} (1)",
+ outlives);
- let region_result = infcx.commit_if_ok(|_| {
- let (outlives, _) =
- infcx.replace_late_bound_regions_with_fresh_var(
- span,
- infer::AssocTypeProjection(projection_ty.item_name),
- &outlives);
+ // apply the substitutions (and normalize any projected types)
+ let outlives = self.instantiate_type_scheme(span,
+ projection_ty.trait_ref.substs,
+ &outlives);
- debug!("projection_bounds: outlives={:?} (3)",
+ debug!("projection_bounds: outlives={:?} (2)",
outlives);
- // check whether this predicate applies to our current projection
- match infer::mk_eqty(infcx, false, TypeOrigin::Misc(span), ty, outlives.0) {
- Ok(InferOk { obligations, .. }) => {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
- Ok(outlives.1)
+ let region_result = self.commit_if_ok(|_| {
+ let (outlives, _) =
+ self.replace_late_bound_regions_with_fresh_var(
+ span,
+ infer::AssocTypeProjection(projection_ty.item_name),
+ &outlives);
+
+ debug!("projection_bounds: outlives={:?} (3)",
+ outlives);
+
+ // check whether this predicate applies to our current projection
+ match self.eq_types(false, TypeOrigin::Misc(span), ty, outlives.0) {
+ Ok(InferOk { obligations, .. }) => {
+ // FIXME(#32730) propagate obligations
+ assert!(obligations.is_empty());
+ Ok(outlives.1)
+ }
+ Err(_) => { Err(()) }
}
- Err(_) => { Err(()) }
- }
- });
+ });
- debug!("projection_bounds: region_result={:?}",
- region_result);
+ debug!("projection_bounds: region_result={:?}",
+ region_result);
- region_result.ok()
- })
- .collect()
+ region_result.ok()
+ })
+ .collect()
+ }
}
use super::FnCtxt;
-use check::demand;
use middle::expr_use_visitor as euv;
use middle::mem_categorization as mc;
use middle::mem_categorization::Categorization;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::infer::{InferCtxt, UpvarRegion};
+use rustc::ty::{self, Ty};
+use rustc::infer::UpvarRegion;
use std::collections::HashSet;
use syntax::ast;
use syntax::codemap::Span;
///////////////////////////////////////////////////////////////////////////
// PUBLIC ENTRY POINTS
-pub fn closure_analyze_fn(fcx: &FnCtxt,
- _id: ast::NodeId,
- _decl: &hir::FnDecl,
- body: &hir::Block)
-{
- let mut seed = SeedBorrowKind::new(fcx);
- seed.visit_block(body);
- let closures_with_inferred_kinds = seed.closures_with_inferred_kinds;
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn closure_analyze_fn(&self, body: &hir::Block) {
+ let mut seed = SeedBorrowKind::new(self);
+ seed.visit_block(body);
+ let closures_with_inferred_kinds = seed.closures_with_inferred_kinds;
- let mut adjust = AdjustBorrowKind::new(fcx, &closures_with_inferred_kinds);
- adjust.visit_block(body);
+ let mut adjust = AdjustBorrowKind::new(self, &closures_with_inferred_kinds);
+ adjust.visit_block(body);
- // it's our job to process these.
- assert!(fcx.inh.deferred_call_resolutions.borrow().is_empty());
-}
+ // it's our job to process these.
+ assert!(self.deferred_call_resolutions.borrow().is_empty());
+ }
-pub fn closure_analyze_const(fcx: &FnCtxt,
- body: &hir::Expr)
-{
- let mut seed = SeedBorrowKind::new(fcx);
- seed.visit_expr(body);
- let closures_with_inferred_kinds = seed.closures_with_inferred_kinds;
+ pub fn closure_analyze_const(&self, body: &hir::Expr) {
+ let mut seed = SeedBorrowKind::new(self);
+ seed.visit_expr(body);
+ let closures_with_inferred_kinds = seed.closures_with_inferred_kinds;
- let mut adjust = AdjustBorrowKind::new(fcx, &closures_with_inferred_kinds);
- adjust.visit_expr(body);
+ let mut adjust = AdjustBorrowKind::new(self, &closures_with_inferred_kinds);
+ adjust.visit_expr(body);
- // it's our job to process these.
- assert!(fcx.inh.deferred_call_resolutions.borrow().is_empty());
+ // it's our job to process these.
+ assert!(self.deferred_call_resolutions.borrow().is_empty());
+ }
}
///////////////////////////////////////////////////////////////////////////
// SEED BORROW KIND
-struct SeedBorrowKind<'a,'tcx:'a> {
- fcx: &'a FnCtxt<'a,'tcx>,
+struct SeedBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
closures_with_inferred_kinds: HashSet<ast::NodeId>,
}
-impl<'a, 'tcx, 'v> Visitor<'v> for SeedBorrowKind<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for SeedBorrowKind<'a, 'gcx, 'tcx> {
fn visit_expr(&mut self, expr: &hir::Expr) {
match expr.node {
hir::ExprClosure(cc, _, ref body, _) => {
}
}
-impl<'a,'tcx> SeedBorrowKind<'a,'tcx> {
- fn new(fcx: &'a FnCtxt<'a,'tcx>) -> SeedBorrowKind<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> SeedBorrowKind<'a, 'gcx, 'tcx> {
+ fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>) -> SeedBorrowKind<'a, 'gcx, 'tcx> {
SeedBorrowKind { fcx: fcx, closures_with_inferred_kinds: HashSet::new() }
}
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
- self.fcx.tcx()
- }
-
- fn infcx(&self) -> &'a InferCtxt<'a,'tcx> {
- self.fcx.infcx()
- }
-
fn check_closure(&mut self,
expr: &hir::Expr,
capture_clause: hir::CaptureClause,
_body: &hir::Block)
{
- let closure_def_id = self.tcx().map.local_def_id(expr.id);
- if !self.fcx.inh.tables.borrow().closure_kinds.contains_key(&closure_def_id) {
+ let closure_def_id = self.fcx.tcx.map.local_def_id(expr.id);
+ if !self.fcx.tables.borrow().closure_kinds.contains_key(&closure_def_id) {
self.closures_with_inferred_kinds.insert(expr.id);
- self.fcx.inh.tables.borrow_mut().closure_kinds
- .insert(closure_def_id, ty::ClosureKind::Fn);
+ self.fcx.tables.borrow_mut().closure_kinds
+ .insert(closure_def_id, ty::ClosureKind::Fn);
debug!("check_closure: adding closure_id={:?} to closures_with_inferred_kinds",
closure_def_id);
}
- self.tcx().with_freevars(expr.id, |freevars| {
+ self.fcx.tcx.with_freevars(expr.id, |freevars| {
for freevar in freevars {
let var_node_id = freevar.def.var_id();
let upvar_id = ty::UpvarId { var_id: var_node_id,
}
hir::CaptureByRef => {
let origin = UpvarRegion(upvar_id, expr.span);
- let freevar_region = self.infcx().next_region_var(origin);
+ let freevar_region = self.fcx.next_region_var(origin);
let upvar_borrow = ty::UpvarBorrow { kind: ty::ImmBorrow,
region: freevar_region };
ty::UpvarCapture::ByRef(upvar_borrow)
}
};
- self.fcx.inh.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind);
+ self.fcx.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind);
}
});
}
///////////////////////////////////////////////////////////////////////////
// ADJUST BORROW KIND
-struct AdjustBorrowKind<'a,'tcx:'a> {
- fcx: &'a FnCtxt<'a,'tcx>,
+struct AdjustBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
closures_with_inferred_kinds: &'a HashSet<ast::NodeId>,
}
-impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> {
- fn new(fcx: &'a FnCtxt<'a,'tcx>,
+impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> {
+ fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
closures_with_inferred_kinds: &'a HashSet<ast::NodeId>)
- -> AdjustBorrowKind<'a,'tcx> {
+ -> AdjustBorrowKind<'a, 'gcx, 'tcx> {
AdjustBorrowKind { fcx: fcx, closures_with_inferred_kinds: closures_with_inferred_kinds }
}
debug!("analyze_closure(id={:?}, body.id={:?})", id, body.id);
{
- let mut euv = euv::ExprUseVisitor::new(self, self.fcx.infcx());
+ let mut euv = euv::ExprUseVisitor::new(self, self.fcx);
euv.walk_fn(decl, body);
}
debug!("analyze_closure: id={:?} closure_substs={:?} final_upvar_tys={:?}",
id, closure_substs, final_upvar_tys);
for (&upvar_ty, final_upvar_ty) in closure_substs.upvar_tys.iter().zip(final_upvar_tys) {
- demand::eqtype(self.fcx, span, final_upvar_ty, upvar_ty);
+ self.fcx.demand_eqtype(span, final_upvar_ty, upvar_ty);
}
// Now we must process and remove any deferred resolutions,
// since we have a concrete closure kind.
- let closure_def_id = self.fcx.tcx().map.local_def_id(id);
+ let closure_def_id = self.fcx.tcx.map.local_def_id(id);
if self.closures_with_inferred_kinds.contains(&id) {
let mut deferred_call_resolutions =
self.fcx.remove_deferred_call_resolutions(closure_def_id);
// local crate or were inlined into it along with some function.
// This may change if abstract return types of some sort are
// implemented.
- let tcx = self.fcx.tcx();
+ let tcx = self.fcx.tcx;
tcx.with_freevars(closure_id, |freevars| {
freevars.iter()
.map(|freevar| {
var_id: freevar_node_id,
closure_expr_id: closure_id
};
- let capture = self.fcx.infcx().upvar_capture(upvar_id).unwrap();
+ let capture = self.fcx.upvar_capture(upvar_id).unwrap();
debug!("freevar_node_id={:?} freevar_ty={:?} capture={:?}",
freevar_node_id, freevar_ty, capture);
upvar_id);
// to move out of an upvar, this must be a FnOnce closure
- self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnOnce);
+ self.adjust_closure_kind(upvar_id.closure_expr_id,
+ ty::ClosureKind::FnOnce);
let upvar_capture_map =
- &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map;
+ &mut self.fcx.tables.borrow_mut().upvar_capture_map;
upvar_capture_map.insert(upvar_id, ty::UpvarCapture::ByValue);
}
mc::NoteClosureEnv(upvar_id) => {
// must still adjust the kind of the closure
// to be a FnOnce closure to permit moves out
// of the environment.
- self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnOnce);
+ self.adjust_closure_kind(upvar_id.closure_expr_id,
+ ty::ClosureKind::FnOnce);
}
mc::NoteNone => {
}
// borrow_kind of the upvar to make sure it
// is inferred to mutable if necessary
{
- let upvar_capture_map = &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map;
+ let upvar_capture_map = &mut self.fcx.tables.borrow_mut().upvar_capture_map;
let ub = upvar_capture_map.get_mut(&upvar_id).unwrap();
self.adjust_upvar_borrow_kind(upvar_id, ub, borrow_kind);
}
}
}
- /// We infer the borrow_kind with which to borrow upvars in a stack closure. The borrow_kind
- /// basically follows a lattice of `imm < unique-imm < mut`, moving from left to right as needed
- /// (but never right to left). Here the argument `mutbl` is the borrow_kind that is required by
+ /// We infer the borrow_kind with which to borrow upvars in a stack closure.
+ /// The borrow_kind basically follows a lattice of `imm < unique-imm < mut`,
+ /// moving from left to right as needed (but never right to left).
+ /// Here the argument `mutbl` is the borrow_kind that is required by
/// some particular use.
fn adjust_upvar_borrow_kind(&self,
upvar_id: ty::UpvarId,
return;
}
- let closure_def_id = self.fcx.tcx().map.local_def_id(closure_id);
- let closure_kinds = &mut self.fcx.inh.tables.borrow_mut().closure_kinds;
+ let closure_def_id = self.fcx.tcx.map.local_def_id(closure_id);
+ let closure_kinds = &mut self.fcx.tables.borrow_mut().closure_kinds;
let existing_kind = *closure_kinds.get(&closure_def_id).unwrap();
debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}",
}
}
-impl<'a, 'tcx, 'v> Visitor<'v> for AdjustBorrowKind<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for AdjustBorrowKind<'a, 'gcx, 'tcx> {
fn visit_fn(&mut self,
fn_kind: intravisit::FnKind<'v>,
decl: &'v hir::FnDecl,
}
}
-impl<'a,'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a,'tcx> {
+impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a, 'gcx, 'tcx> {
fn consume(&mut self,
_consume_id: ast::NodeId,
_consume_span: Span,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use check::{FnCtxt, Inherited, blank_fn_ctxt, regionck};
+use check::FnCtxt;
use constrained_type_params::{identify_constrained_type_params, Parameter};
use CrateCtxt;
use hir::def_id::DefId;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt};
-use std::cell::RefCell;
use std::collections::HashSet;
use syntax::ast;
use syntax::codemap::{Span};
code: traits::ObligationCauseCode<'tcx>,
}
-impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
- pub fn new(ccx: &'ccx CrateCtxt<'ccx, 'tcx>)
- -> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
+/// Helper type of a temporary returned by .for_item(...).
+/// Necessary because we can't write the following bound:
+/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(FnCtxt<'b, 'gcx, 'tcx>).
+struct CheckWfFcxBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ inherited: super::InheritedBuilder<'a, 'gcx, 'tcx>,
+ code: traits::ObligationCauseCode<'gcx>,
+ id: ast::NodeId,
+ span: Span
+}
+
+impl<'a, 'gcx, 'tcx> CheckWfFcxBuilder<'a, 'gcx, 'tcx> {
+ fn with_fcx<F>(&'tcx mut self, f: F) where
+ F: for<'b> FnOnce(&FnCtxt<'b, 'gcx, 'tcx>,
+ &mut CheckTypeWellFormedVisitor<'b, 'gcx>) -> Vec<Ty<'tcx>>
+ {
+ let code = self.code.clone();
+ let id = self.id;
+ let span = self.span;
+ self.inherited.enter(|inh| {
+ let fcx = FnCtxt::new(&inh, ty::FnDiverging, id);
+ let wf_tys = f(&fcx, &mut CheckTypeWellFormedVisitor {
+ ccx: fcx.ccx,
+ code: code
+ });
+ fcx.select_all_obligations_or_error();
+ fcx.regionck_item(id, span, &wf_tys);
+ });
+ }
+}
+
+impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> {
+ pub fn new(ccx: &'ccx CrateCtxt<'ccx, 'gcx>)
+ -> CheckTypeWellFormedVisitor<'ccx, 'gcx> {
CheckTypeWellFormedVisitor {
ccx: ccx,
code: traits::ObligationCauseCode::MiscObligation
}
}
- fn tcx(&self) -> &TyCtxt<'tcx> {
+ fn tcx(&self) -> TyCtxt<'ccx, 'gcx, 'gcx> {
self.ccx.tcx
}
}
hir::ItemStruct(ref struct_def, ref ast_generics) => {
self.check_type_defn(item, |fcx| {
- vec![struct_variant(fcx, struct_def)]
+ vec![fcx.struct_variant(struct_def)]
});
self.check_variances_for_type_defn(item, ast_generics);
}
hir::ItemEnum(ref enum_def, ref ast_generics) => {
self.check_type_defn(item, |fcx| {
- enum_variants(fcx, enum_def)
+ fcx.enum_variants(enum_def)
});
self.check_variances_for_type_defn(item, ast_generics);
fn check_trait_or_impl_item(&mut self, item_id: ast::NodeId, span: Span) {
let code = self.code.clone();
- self.with_fcx(item_id, span, |fcx, this| {
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- let free_id_outlive = fcx.inh.infcx.parameter_environment.free_id_outlive;
+ self.for_id(item_id, span).with_fcx(|fcx, this| {
+ let free_substs = &fcx.parameter_environment.free_substs;
+ let free_id_outlive = fcx.parameter_environment.free_id_outlive;
- let item = fcx.tcx().impl_or_trait_item(fcx.tcx().map.local_def_id(item_id));
+ let item = fcx.tcx.impl_or_trait_item(fcx.tcx.map.local_def_id(item_id));
let (mut implied_bounds, self_ty) = match item.container() {
- ty::TraitContainer(_) => (vec![], fcx.tcx().mk_self_type()),
- ty::ImplContainer(def_id) => (impl_implied_bounds(fcx, def_id, span),
- fcx.tcx().lookup_item_type(def_id).ty)
+ ty::TraitContainer(_) => (vec![], fcx.tcx.mk_self_type()),
+ ty::ImplContainer(def_id) => (fcx.impl_implied_bounds(def_id, span),
+ fcx.tcx.lookup_item_type(def_id).ty)
};
match item {
fcx.register_wf_obligation(ty, span, code.clone());
}
ty::MethodTraitItem(method) => {
- reject_shadowing_type_parameters(fcx.tcx(), span, &method.generics);
+ reject_shadowing_type_parameters(fcx.tcx, span, &method.generics);
let method_ty = fcx.instantiate_type_scheme(span, free_substs, &method.fty);
let predicates = fcx.instantiate_bounds(span, free_substs, &method.predicates);
this.check_fn_or_method(fcx, span, &method_ty, &predicates,
})
}
- fn with_item_fcx<F>(&mut self, item: &hir::Item, f: F) where
- F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>,
- &mut CheckTypeWellFormedVisitor<'ccx,'tcx>) -> Vec<Ty<'tcx>>,
- {
- self.with_fcx(item.id, item.span, f)
+ fn for_item<'tcx>(&self, item: &hir::Item)
+ -> CheckWfFcxBuilder<'ccx, 'gcx, 'tcx> {
+ self.for_id(item.id, item.span)
}
- fn with_fcx<F>(&mut self, id: ast::NodeId, span: Span, mut f: F) where
- F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>,
- &mut CheckTypeWellFormedVisitor<'ccx,'tcx>) -> Vec<Ty<'tcx>>,
- {
- let ccx = self.ccx;
- let param_env = ty::ParameterEnvironment::for_item(ccx.tcx, id);
- let tables = RefCell::new(ty::Tables::empty());
- let inh = Inherited::new(ccx.tcx, &tables, param_env);
- let fcx = blank_fn_ctxt(ccx, &inh, ty::FnDiverging, id);
- let wf_tys = f(&fcx, self);
- fcx.select_all_obligations_or_error();
- regionck::regionck_item(&fcx, id, span, &wf_tys);
+ fn for_id<'tcx>(&self, id: ast::NodeId, span: Span)
+ -> CheckWfFcxBuilder<'ccx, 'gcx, 'tcx> {
+ let param_env = ty::ParameterEnvironment::for_item(self.ccx.tcx, id);
+ CheckWfFcxBuilder {
+ inherited: self.ccx.inherited(Some(param_env)),
+ code: self.code.clone(),
+ id: id,
+ span: span
+ }
}
/// In a type definition, we check that to ensure that the types of the fields are well-formed.
fn check_type_defn<F>(&mut self, item: &hir::Item, mut lookup_fields: F) where
- F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>) -> Vec<AdtVariant<'tcx>>,
+ F: for<'fcx, 'tcx> FnMut(&FnCtxt<'fcx, 'gcx, 'tcx>)
+ -> Vec<AdtVariant<'tcx>>
{
- self.with_item_fcx(item, |fcx, this| {
+ self.for_item(item).with_fcx(|fcx, this| {
let variants = lookup_fields(fcx);
for variant in &variants {
}
}
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- let predicates = fcx.tcx().lookup_predicates(fcx.tcx().map.local_def_id(item.id));
+ let free_substs = &fcx.parameter_environment.free_substs;
+ let predicates = fcx.tcx.lookup_predicates(fcx.tcx.map.local_def_id(item.id));
let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates);
this.check_where_clauses(fcx, item.span, &predicates);
{
let trait_def_id = self.tcx().map.local_def_id(item.id);
- if self.ccx.tcx.trait_has_default_impl(trait_def_id) {
+ if self.tcx().trait_has_default_impl(trait_def_id) {
if !items.is_empty() {
error_380(self.ccx, item.span);
}
}
- self.with_item_fcx(item, |fcx, this| {
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- let predicates = fcx.tcx().lookup_predicates(trait_def_id);
+ self.for_item(item).with_fcx(|fcx, this| {
+ let free_substs = &fcx.parameter_environment.free_substs;
+ let predicates = fcx.tcx.lookup_predicates(trait_def_id);
let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates);
this.check_where_clauses(fcx, item.span, &predicates);
vec![]
item: &hir::Item,
body: &hir::Block)
{
- self.with_item_fcx(item, |fcx, this| {
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- let type_scheme = fcx.tcx().lookup_item_type(fcx.tcx().map.local_def_id(item.id));
+ self.for_item(item).with_fcx(|fcx, this| {
+ let free_substs = &fcx.parameter_environment.free_substs;
+ let type_scheme = fcx.tcx.lookup_item_type(fcx.tcx.map.local_def_id(item.id));
let item_ty = fcx.instantiate_type_scheme(item.span, free_substs, &type_scheme.ty);
let bare_fn_ty = match item_ty.sty {
ty::TyFnDef(_, _, ref bare_fn_ty) => bare_fn_ty,
}
};
- let predicates = fcx.tcx().lookup_predicates(fcx.tcx().map.local_def_id(item.id));
+ let predicates = fcx.tcx.lookup_predicates(fcx.tcx.map.local_def_id(item.id));
let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates);
let mut implied_bounds = vec![];
- let free_id_outlive = fcx.tcx().region_maps.call_site_extent(item.id, body.id);
+ let free_id_outlive = fcx.tcx.region_maps.call_site_extent(item.id, body.id);
this.check_fn_or_method(fcx, item.span, bare_fn_ty, &predicates,
free_id_outlive, &mut implied_bounds);
implied_bounds
{
debug!("check_item_type: {:?}", item);
- self.with_item_fcx(item, |fcx, this| {
- let type_scheme = fcx.tcx().lookup_item_type(fcx.tcx().map.local_def_id(item.id));
+ self.for_item(item).with_fcx(|fcx, this| {
+ let type_scheme = fcx.tcx.lookup_item_type(fcx.tcx.map.local_def_id(item.id));
let item_ty = fcx.instantiate_type_scheme(item.span,
- &fcx.inh
- .infcx
- .parameter_environment
+ &fcx.parameter_environment
.free_substs,
&type_scheme.ty);
{
debug!("check_impl: {:?}", item);
- self.with_item_fcx(item, |fcx, this| {
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- let item_def_id = fcx.tcx().map.local_def_id(item.id);
+ self.for_item(item).with_fcx(|fcx, this| {
+ let free_substs = &fcx.parameter_environment.free_substs;
+ let item_def_id = fcx.tcx.map.local_def_id(item.id);
match *ast_trait_ref {
Some(ref ast_trait_ref) => {
- let trait_ref = fcx.tcx().impl_trait_ref(item_def_id).unwrap();
+ let trait_ref = fcx.tcx.impl_trait_ref(item_def_id).unwrap();
let trait_ref =
fcx.instantiate_type_scheme(
ast_trait_ref.path.span, free_substs, &trait_ref);
let obligations =
- ty::wf::trait_obligations(fcx.infcx(),
+ ty::wf::trait_obligations(fcx,
fcx.body_id,
&trait_ref,
ast_trait_ref.path.span);
}
}
None => {
- let self_ty = fcx.tcx().node_id_to_type(item.id);
+ let self_ty = fcx.tcx.node_id_to_type(item.id);
let self_ty = fcx.instantiate_type_scheme(item.span, free_substs, &self_ty);
fcx.register_wf_obligation(self_ty, ast_self_ty.span, this.code.clone());
}
}
- let predicates = fcx.tcx().lookup_predicates(item_def_id);
+ let predicates = fcx.tcx.lookup_predicates(item_def_id);
let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates);
this.check_where_clauses(fcx, item.span, &predicates);
- impl_implied_bounds(fcx, fcx.tcx().map.local_def_id(item.id), item.span)
+ fcx.impl_implied_bounds(fcx.tcx.map.local_def_id(item.id), item.span)
});
}
- fn check_where_clauses<'fcx>(&mut self,
- fcx: &FnCtxt<'fcx,'tcx>,
- span: Span,
- predicates: &ty::InstantiatedPredicates<'tcx>)
+ fn check_where_clauses<'fcx, 'tcx>(&mut self,
+ fcx: &FnCtxt<'fcx, 'gcx, 'tcx>,
+ span: Span,
+ predicates: &ty::InstantiatedPredicates<'tcx>)
{
let obligations =
predicates.predicates
.iter()
- .flat_map(|p| ty::wf::predicate_obligations(fcx.infcx(),
+ .flat_map(|p| ty::wf::predicate_obligations(fcx,
fcx.body_id,
p,
span));
}
}
- fn check_fn_or_method<'fcx>(&mut self,
- fcx: &FnCtxt<'fcx,'tcx>,
- span: Span,
- fty: &ty::BareFnTy<'tcx>,
- predicates: &ty::InstantiatedPredicates<'tcx>,
- free_id_outlive: CodeExtent,
- implied_bounds: &mut Vec<Ty<'tcx>>)
+ fn check_fn_or_method<'fcx, 'tcx>(&mut self,
+ fcx: &FnCtxt<'fcx, 'gcx, 'tcx>,
+ span: Span,
+ fty: &'tcx ty::BareFnTy<'tcx>,
+ predicates: &ty::InstantiatedPredicates<'tcx>,
+ free_id_outlive: CodeExtent,
+ implied_bounds: &mut Vec<Ty<'tcx>>)
{
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- let fty = fcx.instantiate_type_scheme(span, free_substs, fty);
- let sig = fcx.tcx().liberate_late_bound_regions(free_id_outlive, &fty.sig);
+ let free_substs = &fcx.parameter_environment.free_substs;
+ let fty = fcx.instantiate_type_scheme(span, free_substs, &fty);
+ let sig = fcx.tcx.liberate_late_bound_regions(free_id_outlive, &fty.sig);
for &input_ty in &sig.inputs {
fcx.register_wf_obligation(input_ty, span, self.code.clone());
self.check_where_clauses(fcx, span, predicates);
}
- fn check_method_receiver<'fcx>(&mut self,
- fcx: &FnCtxt<'fcx,'tcx>,
- span: Span,
- method: &ty::Method<'tcx>,
- free_id_outlive: CodeExtent,
- self_ty: ty::Ty<'tcx>)
+ fn check_method_receiver<'fcx, 'tcx>(&mut self,
+ fcx: &FnCtxt<'fcx, 'gcx, 'tcx>,
+ span: Span,
+ method: &ty::Method<'tcx>,
+ free_id_outlive: CodeExtent,
+ self_ty: ty::Ty<'tcx>)
{
// check that the type of the method's receiver matches the
// method's first parameter.
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
+ let free_substs = &fcx.parameter_environment.free_substs;
let fty = fcx.instantiate_type_scheme(span, free_substs, &method.fty);
- let sig = fcx.tcx().liberate_late_bound_regions(free_id_outlive, &fty.sig);
+ let sig = fcx.tcx.liberate_late_bound_regions(free_id_outlive, &fty.sig);
debug!("check_method_receiver({:?},cat={:?},self_ty={:?},sig={:?})",
method.name, method.explicit_self, self_ty, sig);
ty::ExplicitSelfCategory::Static => return,
ty::ExplicitSelfCategory::ByValue => self_ty,
ty::ExplicitSelfCategory::ByReference(region, mutability) => {
- fcx.tcx().mk_ref(fcx.tcx().mk_region(region), ty::TypeAndMut {
+ fcx.tcx.mk_ref(fcx.tcx.mk_region(region), ty::TypeAndMut {
ty: self_ty,
mutbl: mutability
})
}
- ty::ExplicitSelfCategory::ByBox => fcx.tcx().mk_box(self_ty)
+ ty::ExplicitSelfCategory::ByBox => fcx.tcx.mk_box(self_ty)
};
let rcvr_ty = fcx.instantiate_type_scheme(span, free_substs, &rcvr_ty);
- let rcvr_ty = fcx.tcx().liberate_late_bound_regions(free_id_outlive,
- &ty::Binder(rcvr_ty));
+ let rcvr_ty = fcx.tcx.liberate_late_bound_regions(free_id_outlive,
+ &ty::Binder(rcvr_ty));
debug!("check_method_receiver: receiver ty = {:?}", rcvr_ty);
- let _ = ::require_same_types(
- fcx.tcx(), Some(fcx.infcx()), false, span,
- sig.inputs[0], rcvr_ty,
- "mismatched method receiver");
+ fcx.require_same_types(span, sig.inputs[0], rcvr_ty,
+ "mismatched method receiver");
}
fn check_variances_for_type_defn(&self,
.map(|p| Parameter::Type(p))
.collect();
- identify_constrained_type_params(self.tcx(),
- ty_predicates.predicates.as_slice(),
+ identify_constrained_type_params(ty_predicates.predicates.as_slice(),
None,
&mut constrained_parameters);
span: Span,
param_name: ast::Name)
{
- let mut err = error_392(self.tcx(), span, param_name);
+ let mut err = error_392(self.ccx, span, param_name);
let suggested_marker_id = self.tcx().lang_items.phantom_data();
match suggested_marker_id {
}
}
-fn reject_shadowing_type_parameters<'tcx>(tcx: &TyCtxt<'tcx>,
- span: Span,
- generics: &ty::Generics<'tcx>) {
+fn reject_shadowing_type_parameters(tcx: TyCtxt, span: Span, generics: &ty::Generics) {
let impl_params = generics.types.get_slice(subst::TypeSpace).iter()
.map(|tp| tp.name).collect::<HashSet<_>>();
span: Span,
}
-fn struct_variant<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- struct_def: &hir::VariantData)
- -> AdtVariant<'tcx> {
- let fields =
- struct_def.fields().iter()
- .map(|field| {
- let field_ty = fcx.tcx().node_id_to_type(field.id);
- let field_ty = fcx.instantiate_type_scheme(field.span,
- &fcx.inh
- .infcx
- .parameter_environment
- .free_substs,
- &field_ty);
- AdtField { ty: field_ty, span: field.span }
- })
- .collect();
- AdtVariant { fields: fields }
-}
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ fn struct_variant(&self, struct_def: &hir::VariantData) -> AdtVariant<'tcx> {
+ let fields =
+ struct_def.fields().iter()
+ .map(|field| {
+ let field_ty = self.tcx.node_id_to_type(field.id);
+ let field_ty = self.instantiate_type_scheme(field.span,
+ &self.parameter_environment
+ .free_substs,
+ &field_ty);
+ AdtField { ty: field_ty, span: field.span }
+ })
+ .collect();
+ AdtVariant { fields: fields }
+ }
-fn enum_variants<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- enum_def: &hir::EnumDef)
- -> Vec<AdtVariant<'tcx>> {
- enum_def.variants.iter()
- .map(|variant| struct_variant(fcx, &variant.node.data))
- .collect()
-}
+ fn enum_variants(&self, enum_def: &hir::EnumDef) -> Vec<AdtVariant<'tcx>> {
+ enum_def.variants.iter()
+ .map(|variant| self.struct_variant(&variant.node.data))
+ .collect()
+ }
-fn impl_implied_bounds<'fcx,'tcx>(fcx: &FnCtxt<'fcx, 'tcx>,
- impl_def_id: DefId,
- span: Span)
- -> Vec<Ty<'tcx>>
-{
- let free_substs = &fcx.inh.infcx.parameter_environment.free_substs;
- match fcx.tcx().impl_trait_ref(impl_def_id) {
- Some(ref trait_ref) => {
- // Trait impl: take implied bounds from all types that
- // appear in the trait reference.
- let trait_ref = fcx.instantiate_type_scheme(span, free_substs, trait_ref);
- trait_ref.substs.types.as_slice().to_vec()
- }
+ fn impl_implied_bounds(&self, impl_def_id: DefId, span: Span) -> Vec<Ty<'tcx>> {
+ let free_substs = &self.parameter_environment.free_substs;
+ match self.tcx.impl_trait_ref(impl_def_id) {
+ Some(ref trait_ref) => {
+ // Trait impl: take implied bounds from all types that
+ // appear in the trait reference.
+ let trait_ref = self.instantiate_type_scheme(span, free_substs, trait_ref);
+ trait_ref.substs.types.as_slice().to_vec()
+ }
- None => {
- // Inherent impl: take implied bounds from the self type.
- let self_ty = fcx.tcx().lookup_item_type(impl_def_id).ty;
- let self_ty = fcx.instantiate_type_scheme(span, free_substs, &self_ty);
- vec![self_ty]
+ None => {
+ // Inherent impl: take implied bounds from the self type.
+ let self_ty = self.tcx.lookup_item_type(impl_def_id).ty;
+ let self_ty = self.instantiate_type_scheme(span, free_substs, &self_ty);
+ vec![self_ty]
+ }
}
}
}
-pub fn error_192<'ccx,'tcx>(ccx: &'ccx CrateCtxt<'ccx, 'tcx>, span: Span) {
+fn error_192(ccx: &CrateCtxt, span: Span) {
span_err!(ccx.tcx.sess, span, E0192,
"negative impls are only allowed for traits with \
default impls (e.g., `Send` and `Sync`)")
}
-pub fn error_380<'ccx,'tcx>(ccx: &'ccx CrateCtxt<'ccx, 'tcx>, span: Span) {
+fn error_380(ccx: &CrateCtxt, span: Span) {
span_err!(ccx.tcx.sess, span, E0380,
"traits with default impls (`e.g. unsafe impl \
Trait for ..`) must have no methods or associated items")
}
-pub fn error_392<'tcx>(tcx: &TyCtxt<'tcx>, span: Span, param_name: ast::Name)
+fn error_392<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, span: Span, param_name: ast::Name)
-> DiagnosticBuilder<'tcx> {
- struct_span_err!(tcx.sess, span, E0392,
+ struct_span_err!(ccx.tcx.sess, span, E0392,
"parameter `{}` is never used", param_name)
}
-pub fn error_194<'tcx>(tcx: &TyCtxt<'tcx>, span: Span, name: ast::Name) {
+fn error_194(tcx: TyCtxt, span: Span, name: ast::Name) {
span_err!(tcx.sess, span, E0194,
"type parameter `{}` shadows another type parameter of the same name",
name);
use rustc::ty::{self, Ty, TyCtxt, MethodCall, MethodCallee};
use rustc::ty::adjustment;
use rustc::ty::fold::{TypeFolder,TypeFoldable};
-use rustc::infer;
+use rustc::infer::{InferCtxt, FixupError};
use write_substs_to_tcx;
use write_ty_to_tcx;
///////////////////////////////////////////////////////////////////////////
// Entry point functions
-pub fn resolve_type_vars_in_expr(fcx: &FnCtxt, e: &hir::Expr) {
- assert_eq!(fcx.writeback_errors.get(), false);
- let mut wbcx = WritebackCx::new(fcx);
- wbcx.visit_expr(e);
- wbcx.visit_upvar_borrow_map();
- wbcx.visit_closures();
- wbcx.visit_liberated_fn_sigs();
- wbcx.visit_fru_field_types();
-}
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ pub fn resolve_type_vars_in_expr(&self, e: &hir::Expr) {
+ assert_eq!(self.writeback_errors.get(), false);
+ let mut wbcx = WritebackCx::new(self);
+ wbcx.visit_expr(e);
+ wbcx.visit_upvar_borrow_map();
+ wbcx.visit_closures();
+ wbcx.visit_liberated_fn_sigs();
+ wbcx.visit_fru_field_types();
+ }
-pub fn resolve_type_vars_in_fn(fcx: &FnCtxt,
- decl: &hir::FnDecl,
- blk: &hir::Block) {
- assert_eq!(fcx.writeback_errors.get(), false);
- let mut wbcx = WritebackCx::new(fcx);
- wbcx.visit_block(blk);
- for arg in &decl.inputs {
- wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.id);
- wbcx.visit_pat(&arg.pat);
-
- // Privacy needs the type for the whole pattern, not just each binding
- if !pat_util::pat_is_binding(&fcx.tcx().def_map.borrow(), &arg.pat) {
- wbcx.visit_node_id(ResolvingPattern(arg.pat.span),
- arg.pat.id);
+ pub fn resolve_type_vars_in_fn(&self, decl: &hir::FnDecl, blk: &hir::Block) {
+ assert_eq!(self.writeback_errors.get(), false);
+ let mut wbcx = WritebackCx::new(self);
+ wbcx.visit_block(blk);
+ for arg in &decl.inputs {
+ wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.id);
+ wbcx.visit_pat(&arg.pat);
+
+ // Privacy needs the type for the whole pattern, not just each binding
+ if !pat_util::pat_is_binding(&self.tcx.def_map.borrow(), &arg.pat) {
+ wbcx.visit_node_id(ResolvingPattern(arg.pat.span),
+ arg.pat.id);
+ }
}
+ wbcx.visit_upvar_borrow_map();
+ wbcx.visit_closures();
+ wbcx.visit_liberated_fn_sigs();
+ wbcx.visit_fru_field_types();
}
- wbcx.visit_upvar_borrow_map();
- wbcx.visit_closures();
- wbcx.visit_liberated_fn_sigs();
- wbcx.visit_fru_field_types();
}
///////////////////////////////////////////////////////////////////////////
// there, it applies a few ad-hoc checks that were not convenient to
// do elsewhere.
-struct WritebackCx<'cx, 'tcx: 'cx> {
- fcx: &'cx FnCtxt<'cx, 'tcx>,
+struct WritebackCx<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
+ fcx: &'cx FnCtxt<'cx, 'gcx, 'tcx>,
}
-impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
- fn new(fcx: &'cx FnCtxt<'cx, 'tcx>) -> WritebackCx<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> {
+ fn new(fcx: &'cx FnCtxt<'cx, 'gcx, 'tcx>) -> WritebackCx<'cx, 'gcx, 'tcx> {
WritebackCx { fcx: fcx }
}
- fn tcx(&self) -> &'cx TyCtxt<'tcx> {
- self.fcx.tcx()
+ fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> {
+ self.fcx.tcx
}
// Hacky hack: During type-checking, we treat *all* operators
hir::ExprBinary(ref op, ref lhs, ref rhs) |
hir::ExprAssignOp(ref op, ref lhs, ref rhs) => {
let lhs_ty = self.fcx.node_ty(lhs.id);
- let lhs_ty = self.fcx.infcx().resolve_type_vars_if_possible(&lhs_ty);
+ let lhs_ty = self.fcx.resolve_type_vars_if_possible(&lhs_ty);
let rhs_ty = self.fcx.node_ty(rhs.id);
- let rhs_ty = self.fcx.infcx().resolve_type_vars_if_possible(&rhs_ty);
+ let rhs_ty = self.fcx.resolve_type_vars_if_possible(&rhs_ty);
if lhs_ty.is_scalar() && rhs_ty.is_scalar() {
- self.fcx.inh.tables.borrow_mut().method_map.remove(&MethodCall::expr(e.id));
+ self.fcx.tables.borrow_mut().method_map.remove(&MethodCall::expr(e.id));
// weird but true: the by-ref binops put an
// adjustment on the lhs but not the rhs; the
match e.node {
hir::ExprBinary(..) => {
if !op.node.is_by_value() {
- self.fcx.inh.tables.borrow_mut().adjustments.remove(&lhs.id);
+ self.fcx.tables.borrow_mut().adjustments.remove(&lhs.id);
}
},
hir::ExprAssignOp(..) => {
- self.fcx.inh.tables.borrow_mut().adjustments.remove(&lhs.id);
+ self.fcx.tables.borrow_mut().adjustments.remove(&lhs.id);
},
_ => {},
}
// below. In general, a function is made into a `visitor` if it must
// traffic in node-ids or update tables in the type context etc.
-impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'gcx, 'tcx> {
fn visit_stmt(&mut self, s: &hir::Stmt) {
if self.fcx.writeback_errors.get() {
return;
let var_ty = self.fcx.local_ty(l.span, l.id);
let var_ty = self.resolve(&var_ty, ResolvingLocal(l.span));
- write_ty_to_tcx(self.tcx(), l.id, var_ty);
+ write_ty_to_tcx(self.fcx.ccx, l.id, var_ty);
intravisit::walk_local(self, l);
}
match t.node {
hir::TyFixedLengthVec(ref ty, ref count_expr) => {
self.visit_ty(&ty);
- write_ty_to_tcx(self.tcx(), count_expr.id, self.tcx().types.usize);
+ write_ty_to_tcx(self.fcx.ccx, count_expr.id, self.tcx().types.usize);
}
hir::TyBareFn(ref function_declaration) => {
intravisit::walk_fn_decl_nopat(self, &function_declaration.decl);
}
}
-impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> {
fn visit_upvar_borrow_map(&self) {
if self.fcx.writeback_errors.get() {
return;
}
- for (upvar_id, upvar_capture) in self.fcx.inh.tables.borrow().upvar_capture_map.iter() {
+ for (upvar_id, upvar_capture) in self.fcx.tables.borrow().upvar_capture_map.iter() {
let new_upvar_capture = match *upvar_capture {
ty::UpvarCapture::ByValue => ty::UpvarCapture::ByValue,
ty::UpvarCapture::ByRef(ref upvar_borrow) => {
debug!("Upvar capture for {:?} resolved to {:?}",
upvar_id,
new_upvar_capture);
- self.fcx.tcx()
- .tables
- .borrow_mut()
- .upvar_capture_map
- .insert(*upvar_id, new_upvar_capture);
+ self.tcx()
+ .tables
+ .borrow_mut()
+ .upvar_capture_map
+ .insert(*upvar_id, new_upvar_capture);
}
}
return
}
- for (def_id, closure_ty) in self.fcx.inh.tables.borrow().closure_tys.iter() {
+ for (def_id, closure_ty) in self.fcx.tables.borrow().closure_tys.iter() {
let closure_ty = self.resolve(closure_ty, ResolvingClosure(*def_id));
- self.fcx.tcx().tables.borrow_mut().closure_tys.insert(*def_id, closure_ty);
+ self.tcx().tables.borrow_mut().closure_tys.insert(*def_id, closure_ty);
}
- for (def_id, &closure_kind) in self.fcx.inh.tables.borrow().closure_kinds.iter() {
- self.fcx.tcx().tables.borrow_mut().closure_kinds.insert(*def_id, closure_kind);
+ for (def_id, &closure_kind) in self.fcx.tables.borrow().closure_kinds.iter() {
+ self.tcx().tables.borrow_mut().closure_kinds.insert(*def_id, closure_kind);
}
}
// Resolve the type of the node with id `id`
let n_ty = self.fcx.node_ty(id);
let n_ty = self.resolve(&n_ty, reason);
- write_ty_to_tcx(self.tcx(), id, n_ty);
+ write_ty_to_tcx(self.fcx.ccx, id, n_ty);
debug!("Node {} has type {:?}", id, n_ty);
// Resolve any substitutions
self.fcx.opt_node_ty_substs(id, |item_substs| {
- write_substs_to_tcx(self.tcx(), id,
+ write_substs_to_tcx(self.fcx.ccx, id,
self.resolve(item_substs, reason));
});
}
fn visit_adjustments(&self, reason: ResolveReason, id: ast::NodeId) {
- let adjustments = self.fcx.inh.tables.borrow_mut().adjustments.remove(&id);
+ let adjustments = self.fcx.tables.borrow_mut().adjustments.remove(&id);
match adjustments {
None => {
debug!("No adjustments for node {}", id);
reason: ResolveReason,
method_call: MethodCall) {
// Resolve any method map entry
- let new_method = match self.fcx.inh.tables.borrow_mut().method_map.remove(&method_call) {
+ let new_method = match self.fcx.tables.borrow_mut().method_map.remove(&method_call) {
Some(method) => {
debug!("writeback::resolve_method_map_entry(call={:?}, entry={:?})",
method_call,
let new_method = MethodCallee {
def_id: method.def_id,
ty: self.resolve(&method.ty, reason),
- substs: self.tcx().mk_substs(self.resolve(method.substs, reason)),
+ substs: self.resolve(&method.substs, reason),
};
Some(new_method)
}
fn visit_liberated_fn_sigs(&self) {
- for (&node_id, fn_sig) in self.fcx.inh.tables.borrow().liberated_fn_sigs.iter() {
+ for (&node_id, fn_sig) in self.fcx.tables.borrow().liberated_fn_sigs.iter() {
let fn_sig = self.resolve(fn_sig, ResolvingFnSig(node_id));
self.tcx().tables.borrow_mut().liberated_fn_sigs.insert(node_id, fn_sig.clone());
}
}
fn visit_fru_field_types(&self) {
- for (&node_id, ftys) in self.fcx.inh.tables.borrow().fru_field_types.iter() {
+ for (&node_id, ftys) in self.fcx.tables.borrow().fru_field_types.iter() {
let ftys = self.resolve(ftys, ResolvingFieldTypes(node_id));
self.tcx().tables.borrow_mut().fru_field_types.insert(node_id, ftys);
}
}
- fn resolve<T:TypeFoldable<'tcx>>(&self, t: &T, reason: ResolveReason) -> T {
- t.fold_with(&mut Resolver::new(self.fcx, reason))
+ fn resolve<T>(&self, x: &T, reason: ResolveReason) -> T::Lifted
+ where T: TypeFoldable<'tcx> + ty::Lift<'gcx>
+ {
+ let x = x.fold_with(&mut Resolver::new(self.fcx, reason));
+ if let Some(lifted) = self.tcx().lift_to_global(&x) {
+ lifted
+ } else {
+ span_bug!(reason.span(self.tcx()),
+ "writeback: `{:?}` missing from the global type context", x);
+ }
}
}
ResolvingFieldTypes(ast::NodeId)
}
-impl ResolveReason {
- fn span(&self, tcx: &TyCtxt) -> Span {
+impl<'a, 'gcx, 'tcx> ResolveReason {
+ fn span(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Span {
match *self {
ResolvingExpr(s) => s,
ResolvingLocal(s) => s,
// The Resolver. This is the type folding engine that detects
// unresolved types and so forth.
-struct Resolver<'cx, 'tcx: 'cx> {
- tcx: &'cx TyCtxt<'tcx>,
- infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
+struct Resolver<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
+ tcx: TyCtxt<'cx, 'gcx, 'tcx>,
+ infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason,
}
-impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
- fn new(fcx: &'cx FnCtxt<'cx, 'tcx>,
+impl<'cx, 'gcx, 'tcx> Resolver<'cx, 'gcx, 'tcx> {
+ fn new(fcx: &'cx FnCtxt<'cx, 'gcx, 'tcx>,
reason: ResolveReason)
- -> Resolver<'cx, 'tcx>
+ -> Resolver<'cx, 'gcx, 'tcx>
{
- Resolver::from_infcx(fcx.infcx(), &fcx.writeback_errors, reason)
+ Resolver::from_infcx(fcx, &fcx.writeback_errors, reason)
}
- fn from_infcx(infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
+ fn from_infcx(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason)
- -> Resolver<'cx, 'tcx>
+ -> Resolver<'cx, 'gcx, 'tcx>
{
Resolver { infcx: infcx,
tcx: infcx.tcx,
reason: reason }
}
- fn report_error(&self, e: infer::FixupError) {
+ fn report_error(&self, e: FixupError) {
self.writeback_errors.set(true);
if !self.tcx.sess.has_errors() {
match self.reason {
ResolvingExpr(span) => {
span_err!(self.tcx.sess, span, E0101,
- "cannot determine a type for this expression: {}",
- infer::fixup_err_to_string(e));
+ "cannot determine a type for this expression: {}", e);
}
ResolvingLocal(span) => {
span_err!(self.tcx.sess, span, E0102,
- "cannot determine a type for this local variable: {}",
- infer::fixup_err_to_string(e));
+ "cannot determine a type for this local variable: {}", e);
}
ResolvingPattern(span) => {
span_err!(self.tcx.sess, span, E0103,
- "cannot determine a type for this pattern binding: {}",
- infer::fixup_err_to_string(e));
+ "cannot determine a type for this pattern binding: {}", e);
}
ResolvingUpvar(upvar_id) => {
let span = self.reason.span(self.tcx);
span_err!(self.tcx.sess, span, E0104,
"cannot resolve lifetime for captured variable `{}`: {}",
- self.tcx.local_var_name_str(upvar_id.var_id).to_string(),
- infer::fixup_err_to_string(e));
+ self.tcx.local_var_name_str(upvar_id.var_id), e);
}
ResolvingClosure(_) => {
}
}
-impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
- fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
+impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Resolver<'cx, 'gcx, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.tcx
}
use rustc::hir::intravisit::Visitor;
struct UnusedTraitImportVisitor<'a, 'tcx: 'a> {
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx> UnusedTraitImportVisitor<'a, 'tcx> {
}
}
-pub fn check_crate(tcx: &TyCtxt) {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _task = tcx.dep_graph.in_task(DepNode::UnusedTraitCheck);
let mut visitor = UnusedTraitImportVisitor { tcx: tcx };
tcx.map.krate().visit_all_items(&mut visitor);
use rustc::ty::util::CopyImplementationError;
use middle::free_region::FreeRegionMap;
use CrateCtxt;
-use rustc::infer::{self, InferCtxt, TypeOrigin, new_infer_ctxt};
+use rustc::infer::{self, InferCtxt, TypeOrigin};
use std::cell::RefCell;
use std::rc::Rc;
use syntax::codemap::Span;
mod overlap;
mod unsafety;
-// Returns the def ID of the base type, if there is one.
-fn get_base_type_def_id<'a, 'tcx>(inference_context: &InferCtxt<'a, 'tcx>,
- span: Span,
- ty: Ty<'tcx>)
- -> Option<DefId> {
- match ty.sty {
- TyEnum(def, _) |
- TyStruct(def, _) => {
- Some(def.did)
- }
-
- TyTrait(ref t) => {
- Some(t.principal_def_id())
- }
-
- TyBox(_) => {
- inference_context.tcx.lang_items.owned_box()
- }
-
- TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
- TyStr | TyArray(..) | TySlice(..) | TyFnDef(..) | TyFnPtr(_) |
- TyTuple(..) | TyParam(..) | TyError |
- TyRawPtr(_) | TyRef(_, _) | TyProjection(..) => {
- None
- }
-
- TyInfer(..) | TyClosure(..) => {
- // `ty` comes from a user declaration so we should only expect types
- // that the user can type
- span_bug!(
- span,
- "coherence encountered unexpected type searching for base type: {}",
- ty);
- }
- }
-}
-
-struct CoherenceChecker<'a, 'tcx: 'a> {
- crate_context: &'a CrateCtxt<'a, 'tcx>,
- inference_context: InferCtxt<'a, 'tcx>,
+struct CoherenceChecker<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ crate_context: &'a CrateCtxt<'a, 'gcx>,
+ inference_context: InferCtxt<'a, 'gcx, 'tcx>,
inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<DefId>>>>>,
}
-struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
- cc: &'a CoherenceChecker<'a, 'tcx>
+struct CoherenceCheckVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'gcx, 'tcx>
}
-impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx, 'v> intravisit::Visitor<'v> for CoherenceCheckVisitor<'a, 'gcx, 'tcx> {
fn visit_item(&mut self, item: &Item) {
if let ItemImpl(..) = item.node {
self.cc.check_implementation(item)
}
}
-impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> {
+
+ // Returns the def ID of the base type, if there is one.
+ fn get_base_type_def_id(&self, span: Span, ty: Ty<'tcx>) -> Option<DefId> {
+ match ty.sty {
+ TyEnum(def, _) |
+ TyStruct(def, _) => {
+ Some(def.did)
+ }
+
+ TyTrait(ref t) => {
+ Some(t.principal_def_id())
+ }
+
+ TyBox(_) => {
+ self.inference_context.tcx.lang_items.owned_box()
+ }
+
+ TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+ TyStr | TyArray(..) | TySlice(..) | TyFnDef(..) | TyFnPtr(_) |
+ TyTuple(..) | TyParam(..) | TyError |
+ TyRawPtr(_) | TyRef(_, _) | TyProjection(..) => {
+ None
+ }
+
+ TyInfer(..) | TyClosure(..) => {
+ // `ty` comes from a user declaration so we should only expect types
+ // that the user can type
+ span_bug!(
+ span,
+ "coherence encountered unexpected type searching for base type: {}",
+ ty);
+ }
+ }
+ }
+
fn check(&self) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
- if let Some(base_type_def_id) = get_base_type_def_id(
- &self.inference_context, item.span, self_type.ty) {
- self.add_inherent_impl(base_type_def_id, impl_did);
+ if let Some(base_def_id) = self.get_base_type_def_id(item.span, self_type.ty) {
+ self.add_inherent_impl(base_def_id, impl_did);
}
}
Rc::new(RefCell::new(vec!(impl_def_id))));
}
- fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'tcx>, impl_def_id: DefId) {
+ fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'gcx>, impl_def_id: DefId) {
debug!("add_trait_impl: impl_trait_ref={:?} impl_def_id={:?}",
impl_trait_ref, impl_def_id);
let trait_def = self.crate_context.tcx.lookup_trait_def(impl_trait_ref.def_id);
debug!("check_implementations_of_copy: self_type={:?} (free)",
self_type);
- match param_env.can_type_implement_copy(self_type, span) {
+ match param_env.can_type_implement_copy(tcx, self_type, span) {
Ok(()) => {}
Err(CopyImplementationError::InfrigingField(name)) => {
span_err!(tcx.sess, span, E0204,
debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (free)",
source, target);
- let infcx = new_infer_ctxt(tcx, &tcx.tables, Some(param_env), ProjectionMode::Topmost);
+ tcx.infer_ctxt(None, Some(param_env), ProjectionMode::Topmost).enter(|infcx| {
+ let origin = TypeOrigin::Misc(span);
+ let check_mutbl = |mt_a: ty::TypeAndMut<'gcx>, mt_b: ty::TypeAndMut<'gcx>,
+ mk_ptr: &Fn(Ty<'gcx>) -> Ty<'gcx>| {
+ if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) {
+ infcx.report_mismatched_types(origin, mk_ptr(mt_b.ty),
+ target, ty::error::TypeError::Mutability);
+ }
+ (mt_a.ty, mt_b.ty, unsize_trait, None)
+ };
+ let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) {
+ (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None),
+
+ (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => {
+ infcx.sub_regions(infer::RelateObjectBound(span), *r_b, *r_a);
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
+ }
- let origin = TypeOrigin::Misc(span);
- let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>, mt_b: ty::TypeAndMut<'tcx>,
- mk_ptr: &Fn(Ty<'tcx>) -> Ty<'tcx>| {
- if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) {
- infcx.report_mismatched_types(origin, mk_ptr(mt_b.ty),
- target, ty::error::TypeError::Mutability);
- }
- (mt_a.ty, mt_b.ty, unsize_trait, None)
- };
- let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) {
- (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None),
+ (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) |
+ (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => {
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
+ }
- (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => {
- infer::mk_subr(&infcx, infer::RelateObjectBound(span), *r_b, *r_a);
- check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
- }
+ (&ty::TyStruct(def_a, substs_a), &ty::TyStruct(def_b, substs_b)) => {
+ if def_a != def_b {
+ let source_path = tcx.item_path_str(def_a.did);
+ let target_path = tcx.item_path_str(def_b.did);
+ span_err!(tcx.sess, span, E0377,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with the same \
+ definition; expected {}, found {}",
+ source_path, target_path);
+ return;
+ }
- (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) |
- (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => {
- check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
- }
+ let fields = &def_a.struct_variant().fields;
+ let diff_fields = fields.iter().enumerate().filter_map(|(i, f)| {
+ let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
+
+ if f.unsubst_ty().is_phantom_data() {
+ // Ignore PhantomData fields
+ None
+ } else if infcx.sub_types(false, origin, b, a).is_ok() {
+ // Ignore fields that aren't significantly changed
+ None
+ } else {
+ // Collect up all fields that were significantly changed
+ // i.e. those that contain T in coerce_unsized T -> U
+ Some((i, a, b))
+ }
+ }).collect::<Vec<_>>();
+
+ if diff_fields.is_empty() {
+ span_err!(tcx.sess, span, E0374,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with one field \
+ being coerced, none found");
+ return;
+ } else if diff_fields.len() > 1 {
+ span_err!(tcx.sess, span, E0375,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with one field \
+ being coerced, but {} fields need coercions: {}",
+ diff_fields.len(), diff_fields.iter().map(|&(i, a, b)| {
+ format!("{} ({} to {})", fields[i].name, a, b)
+ }).collect::<Vec<_>>().join(", "));
+ return;
+ }
- (&ty::TyStruct(def_a, substs_a), &ty::TyStruct(def_b, substs_b)) => {
- if def_a != def_b {
- let source_path = tcx.item_path_str(def_a.did);
- let target_path = tcx.item_path_str(def_b.did);
- span_err!(tcx.sess, span, E0377,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with the same \
- definition; expected {}, found {}",
- source_path, target_path);
- return;
+ let (i, a, b) = diff_fields[0];
+ let kind = ty::adjustment::CustomCoerceUnsized::Struct(i);
+ (a, b, coerce_unsized_trait, Some(kind))
}
- let fields = &def_a.struct_variant().fields;
- let diff_fields = fields.iter().enumerate().filter_map(|(i, f)| {
- let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
-
- if f.unsubst_ty().is_phantom_data() {
- // Ignore PhantomData fields
- None
- } else if infcx.sub_types(false, origin, b, a).is_ok() {
- // Ignore fields that aren't significantly changed
- None
- } else {
- // Collect up all fields that were significantly changed
- // i.e. those that contain T in coerce_unsized T -> U
- Some((i, a, b))
- }
- }).collect::<Vec<_>>();
-
- if diff_fields.is_empty() {
- span_err!(tcx.sess, span, E0374,
+ _ => {
+ span_err!(tcx.sess, span, E0376,
"the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with one field \
- being coerced, none found");
- return;
- } else if diff_fields.len() > 1 {
- span_err!(tcx.sess, span, E0375,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with one field \
- being coerced, but {} fields need coercions: {}",
- diff_fields.len(), diff_fields.iter().map(|&(i, a, b)| {
- format!("{} ({} to {})", fields[i].name, a, b)
- }).collect::<Vec<_>>().join(", "));
+ for a coercion between structures");
return;
}
+ };
- let (i, a, b) = diff_fields[0];
- let kind = ty::adjustment::CustomCoerceUnsized::Struct(i);
- (a, b, coerce_unsized_trait, Some(kind))
- }
-
- _ => {
- span_err!(tcx.sess, span, E0376,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures");
- return;
- }
- };
-
- let mut fulfill_cx = traits::FulfillmentContext::new();
+ let mut fulfill_cx = traits::FulfillmentContext::new();
- // Register an obligation for `A: Trait<B>`.
- let cause = traits::ObligationCause::misc(span, impl_node_id);
- let predicate = traits::predicate_for_trait_def(tcx, cause, trait_def_id,
- 0, source, vec![target]);
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ // Register an obligation for `A: Trait<B>`.
+ let cause = traits::ObligationCause::misc(span, impl_node_id);
+ let predicate = tcx.predicate_for_trait_def(cause, trait_def_id, 0,
+ source, vec![target]);
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
- // Check that all transitive obligations are satisfied.
- if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) {
- traits::report_fulfillment_errors(&infcx, &errors);
- }
+ // Check that all transitive obligations are satisfied.
+ if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) {
+ infcx.report_fulfillment_errors(&errors);
+ }
- // Finally, resolve all regions.
- let mut free_regions = FreeRegionMap::new();
- free_regions.relate_free_regions_from_predicates(tcx, &infcx.parameter_environment
- .caller_bounds);
- infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id);
+ // Finally, resolve all regions.
+ let mut free_regions = FreeRegionMap::new();
+ free_regions.relate_free_regions_from_predicates(
+ &infcx.parameter_environment.caller_bounds);
+ infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id);
- if let Some(kind) = kind {
- tcx.custom_coerce_unsized_kinds.borrow_mut().insert(impl_did, kind);
- }
+ if let Some(kind) = kind {
+ tcx.custom_coerce_unsized_kinds.borrow_mut().insert(impl_did, kind);
+ }
+ });
});
}
}
-fn enforce_trait_manually_implementable(tcx: &TyCtxt, sp: Span, trait_def_id: DefId) {
+fn enforce_trait_manually_implementable(tcx: TyCtxt, sp: Span, trait_def_id: DefId) {
if tcx.sess.features.borrow().unboxed_closures {
// the feature gate allows all of them
return
err.emit();
}
-pub fn check_coherence(crate_context: &CrateCtxt) {
- let _task = crate_context.tcx.dep_graph.in_task(DepNode::Coherence);
- let infcx = new_infer_ctxt(crate_context.tcx,
- &crate_context.tcx.tables,
- None,
- ProjectionMode::Topmost);
- CoherenceChecker {
- crate_context: crate_context,
- inference_context: infcx,
- inherent_impls: RefCell::new(FnvHashMap()),
- }.check();
- unsafety::check(crate_context.tcx);
- orphan::check(crate_context.tcx);
- overlap::check(crate_context.tcx);
+pub fn check_coherence(ccx: &CrateCtxt) {
+ let _task = ccx.tcx.dep_graph.in_task(DepNode::Coherence);
+ ccx.tcx.infer_ctxt(None, None, ProjectionMode::Topmost).enter(|infcx| {
+ CoherenceChecker {
+ crate_context: ccx,
+ inference_context: infcx,
+ inherent_impls: RefCell::new(FnvHashMap()),
+ }.check();
+ });
+ unsafety::check(ccx.tcx);
+ orphan::check(ccx.tcx);
+ overlap::check(ccx.tcx);
}
use rustc::hir::intravisit;
use rustc::hir;
-pub fn check(tcx: &TyCtxt) {
+pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut orphan = OrphanChecker { tcx: tcx };
tcx.visit_all_items_in_krate(DepNode::CoherenceOrphanCheck, &mut orphan);
}
struct OrphanChecker<'cx, 'tcx:'cx> {
- tcx: &'cx TyCtxt<'tcx>
+ tcx: TyCtxt<'cx, 'tcx, 'tcx>
}
impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> {
use hir::def_id::DefId;
use rustc::traits::{self, ProjectionMode};
-use rustc::infer;
use rustc::ty::{self, TyCtxt};
use syntax::ast;
use rustc::dep_graph::DepNode;
use util::nodemap::DefIdMap;
use lint;
-pub fn check(tcx: &TyCtxt) {
+pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut overlap = OverlapChecker { tcx: tcx,
default_impls: DefIdMap() };
}
struct OverlapChecker<'cx, 'tcx:'cx> {
- tcx: &'cx TyCtxt<'tcx>,
+ tcx: TyCtxt<'cx, 'tcx, 'tcx>,
// maps from a trait def-id to an impl id
default_impls: DefIdMap<ast::NodeId>,
#[derive(Copy, Clone, PartialEq)]
enum Namespace { Type, Value }
- fn name_and_namespace(tcx: &TyCtxt, item: &ty::ImplOrTraitItemId)
- -> (ast::Name, Namespace)
+ fn name_and_namespace<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ item: &ty::ImplOrTraitItemId)
+ -> (ast::Name, Namespace)
{
let name = tcx.impl_or_trait_item(item.def_id()).name();
(name, match *item {
let impl_items = self.tcx.impl_items.borrow();
for item1 in &impl_items[&impl1] {
- let (name, namespace) = name_and_namespace(&self.tcx, item1);
+ let (name, namespace) = name_and_namespace(self.tcx, item1);
for item2 in &impl_items[&impl2] {
- if (name, namespace) == name_and_namespace(&self.tcx, item2) {
+ if (name, namespace) == name_and_namespace(self.tcx, item2) {
let msg = format!("duplicate definitions with name `{}`", name);
let node_id = self.tcx.map.as_local_node_id(item1.def_id()).unwrap();
self.tcx.sess.add_lint(lint::builtin::OVERLAPPING_INHERENT_IMPLS,
for (i, &impl1_def_id) in impls.iter().enumerate() {
for &impl2_def_id in &impls[(i+1)..] {
- let infcx = infer::new_infer_ctxt(self.tcx,
- &self.tcx.tables,
- None,
- ProjectionMode::Topmost);
- if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() {
- self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id)
- }
+ self.tcx.infer_ctxt(None, None, ProjectionMode::Topmost).enter(|infcx| {
+ if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() {
+ self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id)
+ }
+ });
}
}
}
// insertion failed due to overlap
if let Err(overlap) = insert_result {
- // only print the Self type if it has at least some outer
- // concrete shell; otherwise, it's not adding much
- // information.
- let self_type = {
- overlap.on_trait_ref.substs.self_ty().and_then(|ty| {
- if ty.has_concrete_skeleton() {
- Some(format!(" for type `{}`", ty))
- } else {
- None
- }
- }).unwrap_or(String::new())
- };
-
let mut err = struct_span_err!(
self.tcx.sess, self.tcx.span_of_impl(impl_def_id).unwrap(), E0119,
"conflicting implementations of trait `{}`{}:",
- overlap.on_trait_ref,
- self_type);
+ overlap.trait_desc,
+ overlap.self_desc.map_or(String::new(),
+ |ty| format!(" for type `{}`", ty)));
match self.tcx.span_of_impl(overlap.with_impl) {
Ok(span) => {
// This is something like impl Trait1 for Trait2. Illegal
// if Trait1 is a supertrait of Trait2 or Trait2 is not object safe.
- if !traits::is_object_safe(self.tcx, data.principal_def_id()) {
+ if !self.tcx.is_object_safe(data.principal_def_id()) {
// This is an error, but it will be
// reported by wfcheck. Ignore it
// here. This is tested by
use rustc::hir::intravisit;
use rustc::hir;
-pub fn check(tcx: &TyCtxt) {
+pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut orphan = UnsafetyChecker { tcx: tcx };
tcx.map.krate().visit_all_items(&mut orphan);
}
struct UnsafetyChecker<'cx, 'tcx:'cx> {
- tcx: &'cx TyCtxt<'tcx>
+ tcx: TyCtxt<'cx, 'tcx, 'tcx>
}
impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> {
*/
-use astconv::{self, AstConv, ty_of_arg, ast_ty_to_ty, ast_region_to_region};
+use astconv::{AstConv, ast_region_to_region, Bounds, PartitionedBounds, partition_bounds};
use lint;
use hir::def::Def;
use hir::def_id::DefId;
use rustc::hir::map as hir_map;
use util::common::{ErrorReported, MemoizationMap};
use util::nodemap::FnvHashMap;
-use write_ty_to_tcx;
+use {CrateCtxt, write_ty_to_tcx};
use rustc_const_math::ConstInt;
-use std::cell::RefCell;
use std::collections::HashSet;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::rc::Rc;
-use syntax::abi;
-use syntax::ast;
-use syntax::attr;
+use syntax::{abi, ast, attr};
use syntax::codemap::Span;
use syntax::parse::token::keywords;
use syntax::ptr::P;
///////////////////////////////////////////////////////////////////////////
// Main entry point
-pub fn collect_item_types(tcx: &TyCtxt) {
- let ccx = &CrateCtxt { tcx: tcx, stack: RefCell::new(Vec::new()) };
- let mut visitor = CollectItemTypesVisitor{ ccx: ccx };
+pub fn collect_item_types(ccx: &CrateCtxt) {
+ let mut visitor = CollectItemTypesVisitor { ccx: ccx };
ccx.tcx.visit_all_items_in_krate(DepNode::CollectItem, &mut visitor);
}
///////////////////////////////////////////////////////////////////////////
-struct CrateCtxt<'a,'tcx:'a> {
- tcx: &'a TyCtxt<'tcx>,
-
- // This stack is used to identify cycles in the user's source.
- // Note that these cycles can cross multiple items.
- stack: RefCell<Vec<AstConvRequest>>,
-}
-
/// Context specific to some particular item. This is what implements
/// AstConv. It has information about the predicates that are defined
/// on the trait. Unfortunately, this predicate information is
}
#[derive(Copy, Clone, PartialEq, Eq)]
-enum AstConvRequest {
+pub enum AstConvRequest {
GetItemTypeScheme(DefId),
GetTraitDef(DefId),
EnsureSuperPredicates(DefId),
impl<'a,'tcx> ItemCtxt<'a,'tcx> {
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &hir::Ty) -> Ty<'tcx> {
- ast_ty_to_ty(self, rs, ast_ty)
+ AstConv::ast_ty_to_ty(self, rs, ast_ty)
}
}
-impl<'a, 'tcx> AstConv<'tcx> for ItemCtxt<'a, 'tcx> {
- fn tcx(&self) -> &TyCtxt<'tcx> { self.ccx.tcx }
+impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { self.ccx.tcx }
fn get_item_type_scheme(&self, span: Span, id: DefId)
-> Result<ty::TypeScheme<'tcx>, ErrorReported>
}
}
- fn ty_infer(&self,
- _ty_param_def: Option<ty::TypeParameterDef<'tcx>>,
- _substs: Option<&mut Substs<'tcx>>,
- _space: Option<ParamSpace>,
- span: Span) -> Ty<'tcx> {
+ fn get_free_substs(&self) -> Option<&Substs<'tcx>> {
+ None
+ }
+
+ fn ty_infer(&self,
+ _ty_param_def: Option<ty::TypeParameterDef<'tcx>>,
+ _substs: Option<&mut Substs<'tcx>>,
+ _space: Option<ParamSpace>,
+ span: Span) -> Ty<'tcx> {
span_err!(self.tcx().sess, span, E0121,
"the type placeholder `_` is not allowed within types on item signatures");
self.tcx().types.err
}
+ fn projected_ty_from_poly_trait_ref(&self,
+ span: Span,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ item_name: ast::Name)
+ -> Ty<'tcx>
+ {
+ if let Some(trait_ref) = self.tcx().no_late_bound_regions(&poly_trait_ref) {
+ self.projected_ty(span, trait_ref, item_name)
+ } else {
+ // no late-bound regions, we can just ignore the binder
+ span_err!(self.tcx().sess, span, E0212,
+ "cannot extract an associated type from a higher-ranked trait bound \
+ in this context");
+ self.tcx().types.err
+ }
+ }
+
fn projected_ty(&self,
_span: Span,
trait_ref: ty::TraitRef<'tcx>,
/// an `ItemCtxt`. This allows us to use multiple kinds of sources.
trait GetTypeParameterBounds<'tcx> {
fn get_type_parameter_bounds(&self,
- astconv: &AstConv<'tcx>,
+ astconv: &AstConv<'tcx, 'tcx>,
span: Span,
node_id: ast::NodeId)
-> Vec<ty::Predicate<'tcx>>;
where A : GetTypeParameterBounds<'tcx>, B : GetTypeParameterBounds<'tcx>
{
fn get_type_parameter_bounds(&self,
- astconv: &AstConv<'tcx>,
+ astconv: &AstConv<'tcx, 'tcx>,
span: Span,
node_id: ast::NodeId)
-> Vec<ty::Predicate<'tcx>>
/// Empty set of bounds.
impl<'tcx> GetTypeParameterBounds<'tcx> for () {
fn get_type_parameter_bounds(&self,
- _astconv: &AstConv<'tcx>,
+ _astconv: &AstConv<'tcx, 'tcx>,
_span: Span,
_node_id: ast::NodeId)
-> Vec<ty::Predicate<'tcx>>
/// from the trait/impl have been fully converted.
impl<'tcx> GetTypeParameterBounds<'tcx> for ty::GenericPredicates<'tcx> {
fn get_type_parameter_bounds(&self,
- astconv: &AstConv<'tcx>,
+ astconv: &AstConv<'tcx, 'tcx>,
_span: Span,
node_id: ast::NodeId)
-> Vec<ty::Predicate<'tcx>>
/// bounds for a type parameter `X` if `X::Foo` is used.
impl<'tcx> GetTypeParameterBounds<'tcx> for hir::Generics {
fn get_type_parameter_bounds(&self,
- astconv: &AstConv<'tcx>,
+ astconv: &AstConv<'tcx, 'tcx>,
_: Span,
node_id: ast::NodeId)
-> Vec<ty::Predicate<'tcx>>
/// parameter with id `param_id`. We use this so as to avoid running
/// `ast_ty_to_ty`, because we want to avoid triggering an all-out
/// conversion of the type to avoid inducing unnecessary cycles.
-fn is_param<'tcx>(tcx: &TyCtxt<'tcx>,
- ast_ty: &hir::Ty,
- param_id: ast::NodeId)
- -> bool
+fn is_param<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ast_ty: &hir::Ty,
+ param_id: ast::NodeId)
+ -> bool
{
if let hir::TyPath(None, _) = ast_ty.node {
let path_res = *tcx.def_map.borrow().get(&ast_ty.id).unwrap();
ty_generic_predicates_for_fn(ccx, &sig.generics, rcvr_ty_predicates);
let (fty, explicit_self_category) =
- astconv::ty_of_method(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)),
+ AstConv::ty_of_method(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)),
sig, untransformed_rcvr_ty);
let def_id = ccx.tcx.map.local_def_id(id);
- let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
+ let substs = mk_item_substs(ccx, &ty_generics);
let ty_method = ty::Method::new(name,
ty_generics,
def_id,
container);
- let fty = ccx.tcx.mk_fn_def(def_id, substs, ty_method.fty.clone());
+ let fty = ccx.tcx.mk_fn_def(def_id, substs, ty_method.fty);
debug!("method {} (id {}) has type {:?}",
name, id, fty);
ccx.tcx.register_item_type(def_id, TypeScheme {
});
ccx.tcx.predicates.borrow_mut().insert(def_id, ty_method.predicates.clone());
- write_ty_to_tcx(ccx.tcx, id, fty);
+ write_ty_to_tcx(ccx, id, fty);
debug!("writing method type: def_id={:?} mty={:?}",
def_id, ty_method);
{
let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &field.ty);
ty_f.fulfill_ty(tt);
- write_ty_to_tcx(ccx.tcx, field.id, tt);
+ write_ty_to_tcx(ccx, field.id, tt);
/* add the field to the tcache */
ccx.tcx.register_item_type(ccx.tcx.map.local_def_id(field.id),
ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(id),
ty::GenericPredicates::empty());
- write_ty_to_tcx(ccx.tcx, id, ty);
+ write_ty_to_tcx(ccx, id, ty);
let associated_const = Rc::new(ty::AssociatedConst {
name: name,
}
hir::ItemEnum(ref enum_definition, _) => {
let (scheme, predicates) = convert_typed_item(ccx, it);
- write_ty_to_tcx(tcx, it.id, scheme.ty);
+ write_ty_to_tcx(ccx, it.id, scheme.ty);
convert_enum_variant_types(ccx,
tcx.lookup_adt_def_master(ccx.tcx.map.local_def_id(it.id)),
scheme,
},
hir::ItemDefaultImpl(_, ref ast_trait_ref) => {
let trait_ref =
- astconv::instantiate_mono_trait_ref(&ccx.icx(&()),
+ AstConv::instantiate_mono_trait_ref(&ccx.icx(&()),
&ExplicitRscope,
ast_trait_ref,
None);
debug!("convert: impl_bounds={:?}", ty_predicates);
let selfty = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, &selfty);
- write_ty_to_tcx(tcx, it.id, selfty);
+ write_ty_to_tcx(ccx, it.id, selfty);
tcx.register_item_type(def_id,
TypeScheme { generics: ty_generics.clone(),
ty: selfty });
let trait_ref = opt_trait_ref.as_ref().map(|ast_trait_ref| {
- astconv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates),
+ AstConv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates),
&ExplicitRscope,
ast_trait_ref,
Some(selfty))
});
tcx.impl_trait_refs.borrow_mut().insert(def_id, trait_ref);
- enforce_impl_params_are_constrained(tcx, generics, &mut ty_predicates, def_id);
+ enforce_impl_params_are_constrained(ccx, generics, &mut ty_predicates, def_id);
tcx.predicates.borrow_mut().insert(def_id, ty_predicates.clone());
}
}
- enforce_impl_lifetimes_are_constrained(tcx, generics, def_id, impl_items);
+ enforce_impl_lifetimes_are_constrained(ccx, generics, def_id, impl_items);
},
hir::ItemTrait(_, _, _, ref trait_items) => {
let trait_def = trait_def_of_item(ccx, it);
},
hir::ItemStruct(ref struct_def, _) => {
let (scheme, predicates) = convert_typed_item(ccx, it);
- write_ty_to_tcx(tcx, it.id, scheme.ty);
+ write_ty_to_tcx(ccx, it.id, scheme.ty);
let it_def_id = ccx.tcx.map.local_def_id(it.id);
let variant = tcx.lookup_adt_def_master(it_def_id).struct_variant();
hir::ItemTy(_, ref generics) => {
ensure_no_ty_param_bounds(ccx, it.span, generics, "type");
let (scheme, _) = convert_typed_item(ccx, it);
- write_ty_to_tcx(tcx, it.id, scheme.ty);
+ write_ty_to_tcx(ccx, it.id, scheme.ty);
},
_ => {
// This call populates the type cache with the converted type
// of the item in passing. All we have to do here is to write
// it into the node type table.
let (scheme, _) = convert_typed_item(ccx, it);
- write_ty_to_tcx(tcx, it.id, scheme.ty);
+ write_ty_to_tcx(ccx, it.id, scheme.ty);
},
}
}
.map(|field| field.unsubst_ty())
.collect();
let def_id = tcx.map.local_def_id(ctor_id);
- let substs = tcx.mk_substs(mk_item_substs(ccx, &scheme.generics));
- tcx.mk_fn_def(def_id, substs, ty::BareFnTy {
+ let substs = mk_item_substs(ccx, &scheme.generics);
+ tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: abi::Abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(scheme.ty),
variadic: false
})
- })
+ }))
}
};
- write_ty_to_tcx(tcx, ctor_id, ctor_ty);
+ write_ty_to_tcx(ccx, ctor_id, ctor_ty);
tcx.predicates.borrow_mut().insert(tcx.map.local_def_id(ctor_id), predicates);
tcx.register_item_type(tcx.map.local_def_id(ctor_id),
TypeScheme {
}
}
-fn convert_struct_variant<'tcx>(tcx: &TyCtxt<'tcx>,
- did: DefId,
- name: ast::Name,
- disr_val: ty::Disr,
- def: &hir::VariantData) -> ty::VariantDefData<'tcx, 'tcx> {
+fn convert_struct_variant<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ did: DefId,
+ name: ast::Name,
+ disr_val: ty::Disr,
+ def: &hir::VariantData)
+ -> ty::VariantDefData<'tcx, 'tcx> {
let mut seen_fields: FnvHashMap<ast::Name, Span> = FnvHashMap();
- let node_id = tcx.map.as_local_node_id(did).unwrap();
+ let node_id = ccx.tcx.map.as_local_node_id(did).unwrap();
let fields = def.fields().iter().map(|f| {
- let fid = tcx.map.local_def_id(f.id);
+ let fid = ccx.tcx.map.local_def_id(f.id);
let dup_span = seen_fields.get(&f.name).cloned();
if let Some(prev_span) = dup_span {
- let mut err = struct_span_err!(tcx.sess, f.span, E0124,
+ let mut err = struct_span_err!(ccx.tcx.sess, f.span, E0124,
"field `{}` is already declared",
f.name);
span_note!(&mut err, prev_span, "previously declared here");
seen_fields.insert(f.name, f.span);
}
- ty::FieldDefData::new(fid, f.name, ty::Visibility::from_hir(&f.vis, node_id, tcx))
+ ty::FieldDefData::new(fid, f.name,
+ ty::Visibility::from_hir(&f.vis, node_id, ccx.tcx))
}).collect();
ty::VariantDefData {
did: did,
}
}
-fn convert_struct_def<'tcx>(tcx: &TyCtxt<'tcx>,
- it: &hir::Item,
- def: &hir::VariantData)
- -> ty::AdtDefMaster<'tcx>
+fn convert_struct_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ it: &hir::Item,
+ def: &hir::VariantData)
+ -> ty::AdtDefMaster<'tcx>
{
- let did = tcx.map.local_def_id(it.id);
+ let did = ccx.tcx.map.local_def_id(it.id);
let ctor_id = if !def.is_struct() {
- tcx.map.local_def_id(def.id())
+ ccx.tcx.map.local_def_id(def.id())
} else {
did
};
- tcx.intern_adt_def(
- did,
- ty::AdtKind::Struct,
- vec![convert_struct_variant(tcx, ctor_id, it.name, ConstInt::Infer(0), def)]
- )
+ ccx.tcx.intern_adt_def(did, ty::AdtKind::Struct,
+ vec![convert_struct_variant(ccx, ctor_id, it.name, ConstInt::Infer(0), def)])
}
-fn convert_enum_def<'tcx>(tcx: &TyCtxt<'tcx>,
- it: &hir::Item,
- def: &hir::EnumDef)
- -> ty::AdtDefMaster<'tcx>
-{
- fn print_err(tcx: &TyCtxt, span: Span, ty: ty::Ty, cv: ConstVal) {
- struct_span_err!(tcx.sess, span, E0079, "mismatched types")
- .note_expected_found(&"type", &ty, &format!("{}", cv.description()))
- .emit();
- }
- fn evaluate_disr_expr<'tcx>(tcx: &TyCtxt<'tcx>,
- repr_ty: attr::IntType,
- e: &hir::Expr) -> Option<ty::Disr> {
+ fn evaluate_disr_expr(ccx: &CrateCtxt, repr_ty: attr::IntType, e: &hir::Expr)
+ -> Option<ty::Disr> {
debug!("disr expr, checking {}", pprust::expr_to_string(e));
- let ty_hint = repr_ty.to_ty(tcx);
+ let ty_hint = repr_ty.to_ty(ccx.tcx);
+ let print_err = |cv: ConstVal| {
+ struct_span_err!(ccx.tcx.sess, e.span, E0079, "mismatched types")
+ .note_expected_found(&"type", &ty_hint, &format!("{}", cv.description()))
+ .emit();
+ };
+
let hint = UncheckedExprHint(ty_hint);
- match eval_const_expr_partial(tcx, e, hint, None) {
+ match eval_const_expr_partial(ccx.tcx, e, hint, None) {
Ok(ConstVal::Integral(i)) => {
// FIXME: eval_const_expr_partial should return an error if the hint is wrong
match (repr_ty, i) {
- (attr::SignedInt(ast::IntTy::I8), ConstInt::I8(_)) => Some(i),
- (attr::SignedInt(ast::IntTy::I16), ConstInt::I16(_)) => Some(i),
- (attr::SignedInt(ast::IntTy::I32), ConstInt::I32(_)) => Some(i),
- (attr::SignedInt(ast::IntTy::I64), ConstInt::I64(_)) => Some(i),
- (attr::SignedInt(ast::IntTy::Is), ConstInt::Isize(_)) => Some(i),
- (attr::UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) => Some(i),
- (attr::UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) => Some(i),
- (attr::UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) => Some(i),
- (attr::UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) => Some(i),
+ (attr::SignedInt(ast::IntTy::I8), ConstInt::I8(_)) |
+ (attr::SignedInt(ast::IntTy::I16), ConstInt::I16(_)) |
+ (attr::SignedInt(ast::IntTy::I32), ConstInt::I32(_)) |
+ (attr::SignedInt(ast::IntTy::I64), ConstInt::I64(_)) |
+ (attr::SignedInt(ast::IntTy::Is), ConstInt::Isize(_)) |
+ (attr::UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) |
+ (attr::UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) |
+ (attr::UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) |
+ (attr::UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) |
(attr::UnsignedInt(ast::UintTy::Us), ConstInt::Usize(_)) => Some(i),
(_, i) => {
- print_err(tcx, e.span, ty_hint, ConstVal::Integral(i));
+ print_err(ConstVal::Integral(i));
None
},
}
},
Ok(cv) => {
- print_err(tcx, e.span, ty_hint, cv);
+ print_err(cv);
None
},
// enum variant evaluation happens before the global constant check
// so we need to report the real error
Err(ConstEvalErr { kind: ErroneousReferencedConstant(box err), ..}) |
Err(err) => {
- let mut diag = struct_span_err!(tcx.sess, err.span, E0080,
+ let mut diag = struct_span_err!(ccx.tcx.sess, err.span, E0080,
"constant evaluation error: {}",
err.description());
if !e.span.contains(err.span) {
}
}
- fn report_discrim_overflow(tcx: &TyCtxt,
- variant_span: Span,
- variant_name: &str,
- prev_val: ty::Disr) {
- span_err!(tcx.sess, variant_span, E0370,
- "enum discriminant overflowed on value after {}; \
- set explicitly via {} = {} if that is desired outcome",
- prev_val, variant_name, prev_val.wrap_incr());
- }
-
- fn next_disr(tcx: &TyCtxt,
- v: &hir::Variant,
- repr_type: attr::IntType,
- prev_disr_val: Option<ty::Disr>) -> Option<ty::Disr> {
- if let Some(prev_disr_val) = prev_disr_val {
- let result = repr_type.disr_incr(prev_disr_val);
- if let None = result {
- report_discrim_overflow(tcx, v.span, &v.node.name.as_str(), prev_disr_val);
- }
- result
- } else {
- Some(repr_type.initial_discriminant(tcx))
- }
- }
- fn convert_enum_variant<'tcx>(tcx: &TyCtxt<'tcx>,
- v: &hir::Variant,
- disr: ty::Disr)
- -> ty::VariantDefData<'tcx, 'tcx>
- {
- let did = tcx.map.local_def_id(v.node.data.id());
- let name = v.node.name;
- convert_struct_variant(tcx, did, name, disr, &v.node.data)
- }
+fn convert_enum_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ it: &hir::Item,
+ def: &hir::EnumDef)
+ -> ty::AdtDefMaster<'tcx>
+{
+ let tcx = ccx.tcx;
let did = tcx.map.local_def_id(it.id);
let repr_hints = tcx.lookup_repr_hints(did);
let repr_type = tcx.enum_repr_type(repr_hints.get(0));
- let mut prev_disr = None;
+ let initial = repr_type.initial_discriminant(tcx);
+ let mut prev_disr = None::<ty::Disr>;
let variants = def.variants.iter().map(|v| {
- let disr = match v.node.disr_expr {
- Some(ref e) => evaluate_disr_expr(tcx, repr_type, e),
- None => next_disr(tcx, v, repr_type, prev_disr)
- }.unwrap_or_else(|| {
- prev_disr.map(ty::Disr::wrap_incr)
- .unwrap_or(repr_type.initial_discriminant(tcx))
- });
-
+ let wrapped_disr = prev_disr.map_or(initial, |d| d.wrap_incr());
+ let disr = if let Some(ref e) = v.node.disr_expr {
+ evaluate_disr_expr(ccx, repr_type, e)
+ } else if let Some(disr) = repr_type.disr_incr(tcx, prev_disr) {
+ Some(disr)
+ } else {
+ span_err!(tcx.sess, v.span, E0370,
+ "enum discriminant overflowed on value after {}; \
+ set explicitly via {} = {} if that is desired outcome",
+ prev_disr.unwrap(), v.node.name, wrapped_disr);
+ None
+ }.unwrap_or(wrapped_disr);
prev_disr = Some(disr);
- convert_enum_variant(tcx, v, disr)
+
+ let did = tcx.map.local_def_id(v.node.data.id());
+ convert_struct_variant(ccx, did, v.node.name, disr, &v.node.data)
}).collect();
tcx.intern_adt_def(tcx.map.local_def_id(it.id), ty::AdtKind::Enum, variants)
}
}
hir::ItemFn(ref decl, unsafety, _, abi, ref generics, _) => {
let ty_generics = ty_generics_for_fn(ccx, generics, &ty::Generics::empty());
- let tofd = astconv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl);
+ let tofd = AstConv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl);
let def_id = ccx.tcx.map.local_def_id(it.id);
- let substs = tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
+ let substs = mk_item_substs(ccx, &ty_generics);
let ty = tcx.mk_fn_def(def_id, substs, tofd);
ty::TypeScheme { ty: ty, generics: ty_generics }
}
ty::TypeScheme { ty: ty, generics: ty_generics }
}
hir::ItemEnum(ref ei, ref generics) => {
+ let def = convert_enum_def(ccx, it, ei);
let ty_generics = ty_generics_for_type(ccx, generics);
let substs = mk_item_substs(ccx, &ty_generics);
- let def = convert_enum_def(tcx, it, ei);
- let t = tcx.mk_enum(def, tcx.mk_substs(substs));
+ let t = tcx.mk_enum(def, substs);
ty::TypeScheme { ty: t, generics: ty_generics }
}
hir::ItemStruct(ref si, ref generics) => {
+ let def = convert_struct_def(ccx, it, si);
let ty_generics = ty_generics_for_type(ccx, generics);
let substs = mk_item_substs(ccx, &ty_generics);
- let def = convert_struct_def(tcx, it, si);
- let t = tcx.mk_struct(def, tcx.mk_substs(substs));
+ let t = tcx.mk_struct(def, substs);
ty::TypeScheme { ty: t, generics: ty_generics }
}
hir::ItemDefaultImpl(..) |
hir::ForeignItemStatic(ref t, _) => {
ty::TypeScheme {
generics: ty::Generics::empty(),
- ty: ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, t)
+ ty: AstConv::ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, t)
}
}
}
let abi = tcx.map.get_foreign_abi(it.id);
let scheme = type_scheme_of_foreign_item(ccx, it, abi);
- write_ty_to_tcx(ccx.tcx, it.id, scheme.ty);
+ write_ty_to_tcx(ccx, it.id, scheme.ty);
let predicates = match it.node {
hir::ForeignItemFn(_, ref generics) => {
}
// Add the Sized bound, unless the type parameter is marked as `?Sized`.
-fn add_unsized_bound<'tcx>(astconv: &AstConv<'tcx>,
+fn add_unsized_bound<'tcx>(astconv: &AstConv<'tcx, 'tcx>,
bounds: &mut ty::BuiltinBounds,
ast_bounds: &[hir::TyParamBound],
span: Span)
for predicate in &where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(ref bound_pred) => {
- let ty = ast_ty_to_ty(&ccx.icx(&(base_predicates, ast_generics)),
- &ExplicitRscope,
- &bound_pred.bounded_ty);
+ let ty = AstConv::ast_ty_to_ty(&ccx.icx(&(base_predicates, ast_generics)),
+ &ExplicitRscope,
+ &bound_pred.bounded_ty);
for bound in bound_pred.bounds.iter() {
match bound {
index: u32)
-> Ty<'tcx>
{
- let ty = ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, &path);
+ let ty = AstConv::ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, &path);
for leaf_ty in ty.walk() {
if let ty::TyParam(p) = leaf_ty.sty {
hir::TraitTyParamBound(..) =>
None,
hir::RegionTyParamBound(ref lifetime) =>
- Some(astconv::ast_region_to_region(ccx.tcx, lifetime)),
+ Some(ast_region_to_region(ccx.tcx, lifetime)),
}
})
.collect()
/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or
/// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the
/// built-in trait (formerly known as kind): Send.
-fn compute_bounds<'tcx>(astconv: &AstConv<'tcx>,
+fn compute_bounds<'tcx>(astconv: &AstConv<'tcx, 'tcx>,
param_ty: ty::Ty<'tcx>,
ast_bounds: &[hir::TyParamBound],
sized_by_default: SizedByDefault,
span: Span)
- -> astconv::Bounds<'tcx>
+ -> Bounds<'tcx>
{
let mut bounds =
conv_param_bounds(astconv,
/// because this can be anywhere from 0 predicates (`T:?Sized` adds no
/// predicates) to 1 (`T:Foo`) to many (`T:Bar<X=i32>` adds `T:Bar`
/// and `<T as Bar>::X == i32`).
-fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx>,
+fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx, 'tcx>,
param_ty: Ty<'tcx>,
bound: &hir::TyParamBound)
-> Vec<ty::Predicate<'tcx>>
}
}
-fn conv_poly_trait_ref<'tcx>(astconv: &AstConv<'tcx>,
+fn conv_poly_trait_ref<'tcx>(astconv: &AstConv<'tcx, 'tcx>,
param_ty: Ty<'tcx>,
trait_ref: &hir::PolyTraitRef,
projections: &mut Vec<ty::PolyProjectionPredicate<'tcx>>)
-> ty::PolyTraitRef<'tcx>
{
- astconv::instantiate_poly_trait_ref(astconv,
+ AstConv::instantiate_poly_trait_ref(astconv,
&ExplicitRscope,
trait_ref,
Some(param_ty),
projections)
}
-fn conv_param_bounds<'a,'tcx>(astconv: &AstConv<'tcx>,
+fn conv_param_bounds<'a,'tcx>(astconv: &AstConv<'tcx, 'tcx>,
span: Span,
param_ty: ty::Ty<'tcx>,
ast_bounds: &[hir::TyParamBound])
- -> astconv::Bounds<'tcx>
+ -> Bounds<'tcx>
{
let tcx = astconv.tcx();
- let astconv::PartitionedBounds {
+ let PartitionedBounds {
builtin_bounds,
trait_bounds,
region_bounds
- } = astconv::partition_bounds(tcx, span, &ast_bounds);
+ } = partition_bounds(tcx, span, &ast_bounds);
let mut projection_bounds = Vec::new();
.map(|r| ast_region_to_region(tcx, r))
.collect();
- astconv::Bounds {
+ Bounds {
region_bounds: region_bounds,
builtin_bounds: builtin_bounds,
trait_bounds: trait_bounds,
let rb = BindingRscope::new();
let input_tys = decl.inputs
.iter()
- .map(|a| ty_of_arg(&ccx.icx(ast_generics), &rb, a, None))
+ .map(|a| AstConv::ty_of_arg(&ccx.icx(ast_generics), &rb, a, None))
.collect::<Vec<_>>();
let output = match decl.output {
hir::Return(ref ty) =>
- ty::FnConverging(ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &ty)),
+ ty::FnConverging(AstConv::ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &ty)),
hir::DefaultReturn(..) =>
ty::FnConverging(ccx.tcx.mk_nil()),
hir::NoReturn(..) =>
}
}
- let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
- let t_fn = ccx.tcx.mk_fn_def(id, substs, ty::BareFnTy {
+ let substs = mk_item_substs(ccx, &ty_generics);
+ let t_fn = ccx.tcx.mk_fn_def(id, substs, ccx.tcx.mk_bare_fn(ty::BareFnTy {
abi: abi,
unsafety: hir::Unsafety::Unsafe,
sig: ty::Binder(ty::FnSig {inputs: input_tys,
output: output,
variadic: decl.variadic}),
- });
+ }));
ty::TypeScheme {
generics: ty_generics,
fn mk_item_substs<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
ty_generics: &ty::Generics<'tcx>)
- -> Substs<'tcx>
+ -> &'tcx Substs<'tcx>
{
let types =
ty_generics.types.map(
ty_generics.regions.map(
|def| def.to_early_bound_region());
- Substs::new(types, regions)
+ ccx.tcx.mk_substs(Substs::new(types, regions))
}
/// Checks that all the type parameters on an impl
-fn enforce_impl_params_are_constrained<'tcx>(tcx: &TyCtxt<'tcx>,
- ast_generics: &hir::Generics,
- impl_predicates: &mut ty::GenericPredicates<'tcx>,
- impl_def_id: DefId)
+fn enforce_impl_params_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ ast_generics: &hir::Generics,
+ impl_predicates: &mut ty::GenericPredicates<'tcx>,
+ impl_def_id: DefId)
{
- let impl_scheme = tcx.lookup_item_type(impl_def_id);
- let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
+ let impl_scheme = ccx.tcx.lookup_item_type(impl_def_id);
+ let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id);
assert!(impl_predicates.predicates.is_empty_in(FnSpace));
assert!(impl_predicates.predicates.is_empty_in(SelfSpace));
input_parameters.extend(ctp::parameters_for_trait_ref(trait_ref, false));
}
- ctp::setup_constraining_predicates(tcx,
- impl_predicates.predicates.get_mut_slice(TypeSpace),
+ ctp::setup_constraining_predicates(impl_predicates.predicates.get_mut_slice(TypeSpace),
impl_trait_ref,
&mut input_parameters);
idx: index as u32,
name: ty_param.name };
if !input_parameters.contains(&ctp::Parameter::Type(param_ty)) {
- report_unused_parameter(tcx, ty_param.span, "type", ¶m_ty.to_string());
+ report_unused_parameter(ccx, ty_param.span, "type", ¶m_ty.to_string());
}
}
}
-fn enforce_impl_lifetimes_are_constrained<'tcx>(tcx: &TyCtxt<'tcx>,
- ast_generics: &hir::Generics,
- impl_def_id: DefId,
- impl_items: &[hir::ImplItem])
+fn enforce_impl_lifetimes_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
+ ast_generics: &hir::Generics,
+ impl_def_id: DefId,
+ impl_items: &[hir::ImplItem])
{
// Every lifetime used in an associated type must be constrained.
- let impl_scheme = tcx.lookup_item_type(impl_def_id);
- let impl_predicates = tcx.lookup_predicates(impl_def_id);
- let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
+ let impl_scheme = ccx.tcx.lookup_item_type(impl_def_id);
+ let impl_predicates = ccx.tcx.lookup_predicates(impl_def_id);
+ let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id);
let mut input_parameters: HashSet<_> =
ctp::parameters_for_type(impl_scheme.ty, false).into_iter().collect();
if let Some(ref trait_ref) = impl_trait_ref {
input_parameters.extend(ctp::parameters_for_trait_ref(trait_ref, false));
}
- ctp::identify_constrained_type_params(tcx,
+ ctp::identify_constrained_type_params(
&impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters);
- let lifetimes_in_associated_types: HashSet<_> =
- impl_items.iter()
- .map(|item| tcx.impl_or_trait_item(tcx.map.local_def_id(item.id)))
- .filter_map(|item| match item {
- ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty,
- ty::ConstTraitItem(..) | ty::MethodTraitItem(..) => None
- })
- .flat_map(|ty| ctp::parameters_for_type(ty, true))
- .filter_map(|p| match p {
- ctp::Parameter::Type(_) => None,
- ctp::Parameter::Region(r) => Some(r),
- })
- .collect();
+ let lifetimes_in_associated_types: HashSet<_> = impl_items.iter()
+ .map(|item| ccx.tcx.impl_or_trait_item(ccx.tcx.map.local_def_id(item.id)))
+ .filter_map(|item| match item {
+ ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty,
+ ty::ConstTraitItem(..) | ty::MethodTraitItem(..) => None
+ })
+ .flat_map(|ty| ctp::parameters_for_type(ty, true))
+ .filter_map(|p| match p {
+ ctp::Parameter::Type(_) => None,
+ ctp::Parameter::Region(r) => Some(r),
+ })
+ .collect();
for (index, lifetime_def) in ast_generics.lifetimes.iter().enumerate() {
let region = ty::EarlyBoundRegion { space: TypeSpace,
lifetimes_in_associated_types.contains(®ion) && // (*)
!input_parameters.contains(&ctp::Parameter::Region(region))
{
- report_unused_parameter(tcx, lifetime_def.lifetime.span,
+ report_unused_parameter(ccx, lifetime_def.lifetime.span,
"lifetime", ®ion.name.to_string());
}
}
// used elsewhere are not projected back out.
}
-fn report_unused_parameter(tcx: &TyCtxt,
+fn report_unused_parameter(ccx: &CrateCtxt,
span: Span,
kind: &str,
name: &str)
{
- span_err!(tcx.sess, span, E0207,
+ span_err!(ccx.tcx.sess, span, E0207,
"the {} parameter `{}` is not constrained by the \
impl trait, self type, or predicates",
kind, name);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use rustc::ty::subst;
-use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::{self, subst, Ty};
use std::collections::HashSet;
}
}
-pub fn identify_constrained_type_params<'tcx>(_tcx: &TyCtxt<'tcx>,
- predicates: &[ty::Predicate<'tcx>],
+pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>],
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
input_parameters: &mut HashSet<Parameter>)
{
let mut predicates = predicates.to_owned();
- setup_constraining_predicates(_tcx, &mut predicates, impl_trait_ref, input_parameters);
+ setup_constraining_predicates(&mut predicates, impl_trait_ref, input_parameters);
}
/// which is determined by 1, which requires `U`, that is determined
/// by 0. I should probably pick a less tangled example, but I can't
/// think of any.
-pub fn setup_constraining_predicates<'tcx>(_tcx: &TyCtxt<'tcx>,
- predicates: &mut [ty::Predicate<'tcx>],
+pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx>],
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
input_parameters: &mut HashSet<Parameter>)
{
use dep_graph::DepNode;
use hir::map as hir_map;
use hir::def::Def;
-use rustc::infer::{self, TypeOrigin};
+use rustc::infer::TypeOrigin;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::traits::ProjectionMode;
pub struct CrateCtxt<'a, 'tcx: 'a> {
// A mapping from method call sites to traits that have that method.
pub trait_map: hir::TraitMap,
+
/// A vector of every trait accessible in the whole crate
/// (i.e. including those from subcrates). This is used only for
/// error reporting, and so is lazily initialised and generally
/// shouldn't taint the common path (hence the RefCell).
pub all_traits: RefCell<Option<check::method::AllTraitsVec>>,
- pub tcx: &'a TyCtxt<'tcx>,
+
+ /// This stack is used to identify cycles in the user's source.
+ /// Note that these cycles can cross multiple items.
+ pub stack: RefCell<Vec<collect::AstConvRequest>>,
+
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
// Functions that write types into the node type table
-fn write_ty_to_tcx<'tcx>(tcx: &TyCtxt<'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) {
+fn write_ty_to_tcx<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) {
debug!("write_ty_to_tcx({}, {:?})", node_id, ty);
assert!(!ty.needs_infer());
- tcx.node_type_insert(node_id, ty);
+ ccx.tcx.node_type_insert(node_id, ty);
}
-fn write_substs_to_tcx<'tcx>(tcx: &TyCtxt<'tcx>,
+fn write_substs_to_tcx<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
node_id: ast::NodeId,
item_substs: ty::ItemSubsts<'tcx>) {
if !item_substs.is_noop() {
assert!(!item_substs.substs.types.needs_infer());
- tcx.tables.borrow_mut().item_substs.insert(node_id, item_substs);
+ ccx.tcx.tables.borrow_mut().item_substs.insert(node_id, item_substs);
}
}
-fn lookup_full_def(tcx: &TyCtxt, sp: Span, id: ast::NodeId) -> Def {
+fn lookup_full_def(tcx: TyCtxt, sp: Span, id: ast::NodeId) -> Def {
match tcx.def_map.borrow().get(&id) {
Some(x) => x.full_def(),
None => {
}
}
-fn require_c_abi_if_variadic(tcx: &TyCtxt,
+fn require_c_abi_if_variadic(tcx: TyCtxt,
decl: &hir::FnDecl,
abi: Abi,
span: Span) {
}
}
-fn require_same_types<'a, 'tcx>(tcx: &TyCtxt<'tcx>,
- maybe_infcx: Option<&infer::InferCtxt<'a, 'tcx>>,
- t1_is_expected: bool,
+pub fn emit_type_err<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ span: Span,
+ found_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ terr: &ty::error::TypeError<'tcx>,
+ msg: &str) {
+ let mut err = struct_span_err!(tcx.sess, span, E0211, "{}", msg);
+ err = err.span_label(span, &terr);
+ err = err.note_expected_found(&"type", &expected_ty, &found_ty);
+ tcx.note_and_explain_type_err(&mut err, terr, span);
+ err.emit();
+}
+
+fn require_same_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
span: Span,
t1: Ty<'tcx>,
t2: Ty<'tcx>,
msg: &str)
- -> bool
-{
- let result = match maybe_infcx {
- None => {
- let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, ProjectionMode::AnyFinal);
- infer::mk_eqty(&infcx, t1_is_expected, TypeOrigin::Misc(span), t1, t2)
- }
- Some(infcx) => {
- infer::mk_eqty(infcx, t1_is_expected, TypeOrigin::Misc(span), t1, t2)
- }
- };
-
- match result {
- Ok(_) => true,
- Err(ref terr) => {
- let mut err = struct_span_err!(tcx.sess, span, E0211, "{}", msg);
- err = err.span_label(span, &terr);
- let (mut expected_ty, mut found_ty) =
- if t1_is_expected {(t1, t2)} else {(t2, t1)};
- if let Some(infcx) = maybe_infcx {
- expected_ty = infcx.resolve_type_vars_if_possible(&expected_ty);
- found_ty = infcx.resolve_type_vars_if_possible(&found_ty);
- }
- err = err.note_expected_found(&"type",
- &expected_ty,
- &found_ty);
- tcx.note_and_explain_type_err(&mut err, terr, span);
- err.emit();
+ -> bool {
+ ccx.tcx.infer_ctxt(None, None, ProjectionMode::AnyFinal).enter(|infcx| {
+ if let Err(err) = infcx.eq_types(false, TypeOrigin::Misc(span), t1, t2) {
+ emit_type_err(infcx.tcx, span, t1, t2, &err, msg);
false
+ } else {
+ true
}
- }
+ })
}
fn check_main_fn_ty(ccx: &CrateCtxt,
}
let main_def_id = tcx.map.local_def_id(main_id);
let substs = tcx.mk_substs(Substs::empty());
- let se_ty = tcx.mk_fn_def(main_def_id, substs, ty::BareFnTy {
+ let se_ty = tcx.mk_fn_def(main_def_id, substs,
+ tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(tcx.mk_nil()),
variadic: false
})
- });
+ }));
- require_same_types(tcx, None, false, main_span, main_t, se_ty,
+ require_same_types(ccx, main_span, main_t, se_ty,
"main function has wrong type");
}
_ => {
let start_def_id = ccx.tcx.map.local_def_id(start_id);
let substs = tcx.mk_substs(Substs::empty());
- let se_ty = tcx.mk_fn_def(start_def_id, substs, ty::BareFnTy {
+ let se_ty = tcx.mk_fn_def(start_def_id, substs,
+ tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(tcx.types.isize),
variadic: false,
}),
- });
+ }));
- require_same_types(tcx, None, false, start_span, start_t, se_ty,
+ require_same_types(ccx, start_span, start_t, se_ty,
"start function has wrong type");
}
_ => {
}
}
-pub fn check_crate(tcx: &TyCtxt, trait_map: hir::TraitMap) -> CompileResult {
+pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ trait_map: hir::TraitMap)
+ -> CompileResult {
let time_passes = tcx.sess.time_passes();
let ccx = CrateCtxt {
trait_map: trait_map,
all_traits: RefCell::new(None),
+ stack: RefCell::new(Vec::new()),
tcx: tcx
};
// have valid types and not error
tcx.sess.track_errors(|| {
time(time_passes, "type collecting", ||
- collect::collect_item_types(tcx));
+ collect::collect_item_types(&ccx));
})?;
}
impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
- fn tcx(&self) -> &'a TyCtxt<'tcx> {
+ fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.terms_cx.tcx
}
self.add_constraints_from_mt(generics, mt, variance);
}
- ty::TyTuple(ref subtys) => {
+ ty::TyTuple(subtys) => {
for &subty in subtys {
self.add_constraints_from_ty(generics, subty, variance);
}
/// Code for transforming variances.
mod xform;
-pub fn infer_variance(tcx: &TyCtxt) {
+pub fn infer_variance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut arena = arena::TypedArena::new();
let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &mut arena);
let constraints_cx = constraints::add_constraints_from_crate(terms_cx);
// The first pass over the crate simply builds up the set of inferreds.
pub struct TermsContext<'a, 'tcx: 'a> {
- pub tcx: &'a TyCtxt<'tcx>,
+ pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub arena: &'a TypedArena<VarianceTerm<'a>>,
pub empty_variances: Rc<ty::ItemVariances>,
}
pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
- tcx: &'a TyCtxt<'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
arena: &'a mut TypedArena<VarianceTerm<'a>>)
-> TermsContext<'a, 'tcx>
{
terms_cx
}
-fn lang_items(tcx: &TyCtxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
+fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
let all = vec![
(tcx.lang_items.phantom_data(), vec![ty::Covariant]),
(tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
use rustc::hir::print as pprust;
use rustc::ty::{self, TyCtxt};
use rustc::ty::subst;
-use rustc::middle::stability;
use rustc_const_eval::lookup_const_by_id;
})
}
-fn try_inline_def(cx: &DocContext, tcx: &TyCtxt,
- def: Def) -> Option<Vec<clean::Item>> {
+fn try_inline_def<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def: Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(clean::Public),
- stability: stability::lookup_stability(tcx, did).clean(cx),
- deprecation: stability::lookup_deprecation(tcx, did).clean(cx),
+ stability: tcx.lookup_stability(did).clean(cx),
+ deprecation: tcx.lookup_deprecation(did).clean(cx),
def_id: did,
});
Some(ret)
}
-pub fn load_attrs(cx: &DocContext, tcx: &TyCtxt,
- did: DefId) -> Vec<clean::Attribute> {
+pub fn load_attrs<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> Vec<clean::Attribute> {
tcx.get_attrs(did).iter().map(|a| a.clean(cx)).collect()
}
}
}
-pub fn build_external_trait(cx: &DocContext, tcx: &TyCtxt,
- did: DefId) -> clean::Trait {
+pub fn build_external_trait<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> clean::Trait {
let def = tcx.lookup_trait_def(did);
let trait_items = tcx.trait_items(did).clean(cx);
let predicates = tcx.lookup_predicates(did);
}
}
-fn build_external_function(cx: &DocContext, tcx: &TyCtxt, did: DefId) -> clean::Function {
+fn build_external_function<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> clean::Function {
let t = tcx.lookup_item_type(did);
let (decl, style, abi) = match t.ty.sty {
ty::TyFnDef(_, _, ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi),
}
}
-fn build_struct(cx: &DocContext, tcx: &TyCtxt, did: DefId) -> clean::Struct {
+fn build_struct<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> clean::Struct {
let t = tcx.lookup_item_type(did);
let predicates = tcx.lookup_predicates(did);
let variant = tcx.lookup_adt_def(did).struct_variant();
}
}
-fn build_type(cx: &DocContext, tcx: &TyCtxt, did: DefId) -> clean::ItemEnum {
+fn build_type<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> clean::ItemEnum {
let t = tcx.lookup_item_type(did);
let predicates = tcx.lookup_predicates(did);
match t.ty.sty {
}, false)
}
-pub fn build_impls(cx: &DocContext,
- tcx: &TyCtxt,
- did: DefId) -> Vec<clean::Item> {
+pub fn build_impls<'a, 'tcx>(cx: &DocContext,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> Vec<clean::Item> {
tcx.populate_inherent_implementations_for_type_if_necessary(did);
let mut impls = Vec::new();
populate_impls(cx, tcx, item.def, &mut impls);
}
- fn populate_impls(cx: &DocContext, tcx: &TyCtxt,
- def: cstore::DefLike,
- impls: &mut Vec<clean::Item>) {
+ fn populate_impls<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def: cstore::DefLike,
+ impls: &mut Vec<clean::Item>) {
match def {
cstore::DlImpl(did) => build_impl(cx, tcx, did, impls),
cstore::DlDef(Def::Mod(did)) => {
impls
}
-pub fn build_impl(cx: &DocContext,
- tcx: &TyCtxt,
- did: DefId,
- ret: &mut Vec<clean::Item>) {
+pub fn build_impl<'a, 'tcx>(cx: &DocContext,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId,
+ ret: &mut Vec<clean::Item>) {
if !cx.renderinfo.borrow_mut().inlined.insert(did) {
return
}
name: None,
attrs: attrs,
visibility: Some(clean::Inherited),
- stability: stability::lookup_stability(tcx, did).clean(cx),
- deprecation: stability::lookup_deprecation(tcx, did).clean(cx),
+ stability: tcx.lookup_stability(did).clean(cx),
+ deprecation: tcx.lookup_deprecation(did).clean(cx),
def_id: did,
});
}
source: clean::Span::empty(),
attrs: vec![],
visibility: None,
- stability: stability::lookup_stability(tcx, did).clean(cx),
- deprecation: stability::lookup_deprecation(tcx, did).clean(cx),
+ stability: tcx.lookup_stability(did).clean(cx),
+ deprecation: tcx.lookup_deprecation(did).clean(cx),
def_id: did
})
}
source: clean::Span::empty(),
attrs: vec![],
visibility: None,
- stability: stability::lookup_stability(tcx, did).clean(cx),
- deprecation: stability::lookup_deprecation(tcx, did).clean(cx),
+ stability: tcx.lookup_stability(did).clean(cx),
+ deprecation: tcx.lookup_deprecation(did).clean(cx),
def_id: did
})
}
name: None,
attrs: attrs,
visibility: Some(clean::Inherited),
- stability: stability::lookup_stability(tcx, did).clean(cx),
- deprecation: stability::lookup_deprecation(tcx, did).clean(cx),
+ stability: tcx.lookup_stability(did).clean(cx),
+ deprecation: tcx.lookup_deprecation(did).clean(cx),
def_id: did,
});
}
-fn build_module(cx: &DocContext, tcx: &TyCtxt,
- did: DefId) -> clean::Module {
+fn build_module<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
is_crate: false,
};
- fn fill_in(cx: &DocContext, tcx: &TyCtxt, did: DefId,
- items: &mut Vec<clean::Item>) {
+ fn fill_in<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId, items: &mut Vec<clean::Item>) {
// If we're reexporting a reexport it may actually reexport something in
// two namespaces, so the target may be listed twice. Make sure we only
// visit each node at most once.
}
}
-fn build_const(cx: &DocContext, tcx: &TyCtxt,
- did: DefId) -> clean::Constant {
+fn build_const<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId) -> clean::Constant {
let (expr, ty) = lookup_const_by_id(tcx, did, None).unwrap_or_else(|| {
panic!("expected lookup_const_by_id to succeed for {:?}", did);
});
}
}
-fn build_static(cx: &DocContext, tcx: &TyCtxt,
- did: DefId,
- mutable: bool) -> clean::Static {
+fn build_static<'a, 'tcx>(cx: &DocContext, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ did: DefId,
+ mutable: bool) -> clean::Static {
clean::Static {
type_: tcx.lookup_item_type(did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
// extract the stability index for a node from tcx, if possible
fn get_stability(cx: &DocContext, def_id: DefId) -> Option<Stability> {
- cx.tcx_opt().and_then(|tcx| stability::lookup_stability(tcx, def_id)).clean(cx)
+ cx.tcx_opt().and_then(|tcx| tcx.lookup_stability(def_id)).clean(cx)
}
fn get_deprecation(cx: &DocContext, def_id: DefId) -> Option<Deprecation> {
- cx.tcx_opt().and_then(|tcx| stability::lookup_deprecation(tcx, def_id)).clean(cx)
+ cx.tcx_opt().and_then(|tcx| tcx.lookup_deprecation(def_id)).clean(cx)
}
pub trait Clean<T> {
// collect any late bound regions
let mut late_bounds = vec![];
for &ty_s in self.substs.types.get_slice(ParamSpace::TypeSpace) {
- if let ty::TyTuple(ref ts) = ty_s.sty {
+ if let ty::TyTuple(ts) = ty_s.sty {
for &ty_s in ts {
if let ty::TyRef(ref reg, _) = ty_s.sty {
if let &ty::Region::ReLateBound(_, _) = *reg {
_ => None,
}
}
+
+ pub fn is_generic(&self) -> bool {
+ match *self {
+ ResolvedPath { is_generic, .. } => is_generic,
+ _ => false,
+ }
+ }
}
impl GetDefId for Type {
Def::Static(i, _) => (i, TypeStatic),
Def::Variant(i, _) => (i, TypeEnum),
Def::SelfTy(Some(def_id), _) => (def_id, TypeTrait),
- Def::SelfTy(_, Some((impl_id, _))) => return cx.map.local_def_id(impl_id),
+ Def::SelfTy(_, Some(impl_id)) => return cx.map.local_def_id(impl_id),
_ => return def.def_id()
};
if did.is_local() { return did }
inner: AssociatedTypeItem(bounds, self.ty.clean(cx)),
visibility: self.vis.clean(cx),
def_id: self.def_id,
- stability: stability::lookup_stability(cx.tcx(), self.def_id).clean(cx),
- deprecation: stability::lookup_deprecation(cx.tcx(), self.def_id).clean(cx),
+ stability: cx.tcx().lookup_stability(self.def_id).clean(cx),
+ deprecation: cx.tcx().lookup_deprecation(self.def_id).clean(cx),
}
}
}
use rustc::lint;
use rustc_trans::back::link;
use rustc_resolve as resolve;
-use rustc::hir::lowering::{lower_crate, LoweringContext};
use rustc_metadata::cstore::CStore;
use rustc_metadata::creader::LocalCrateReader;
/// Are we generating documentation (`Typed`) or tests (`NotTyped`)?
pub enum MaybeTyped<'a, 'tcx: 'a> {
- Typed(&'a TyCtxt<'tcx>),
+ Typed(TyCtxt<'a, 'tcx, 'tcx>),
NotTyped(&'a session::Session)
}
}
}
- pub fn tcx_opt<'a>(&'a self) -> Option<&'a TyCtxt<'tcx>> {
+ pub fn tcx_opt<'a>(&'a self) -> Option<TyCtxt<'a, 'tcx, 'tcx>> {
match self.maybe_typed {
Typed(tcx) => Some(tcx),
NotTyped(_) => None
}
}
- pub fn tcx<'a>(&'a self) -> &'a TyCtxt<'tcx> {
+ pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
let tcx_opt = self.tcx_opt();
tcx_opt.expect("tcx not present")
}
let defs = &RefCell::new(hir_map::collect_definitions(&krate));
LocalCrateReader::new(&sess, &cstore, &defs, &krate, &name).read_crates(&dep_graph);
- let lcx = LoweringContext::new(&sess, Some(&krate), defs);
- // Lower ast -> hir.
- let mut hir_forest = hir_map::Forest::new(lower_crate(&lcx, &krate), dep_graph);
+ // Lower ast -> hir and resolve.
+ let (analysis, resolutions, mut hir_forest) = {
+ let defs = &mut *defs.borrow_mut();
+ driver::lower_and_resolve(&sess, &name, defs, &krate, dep_graph, resolve::MakeGlobMap::No)
+ };
+
let arenas = ty::CtxtArenas::new();
let hir_map = hir_map::map_crate(&mut hir_forest, defs);
abort_on_err(driver::phase_3_run_analysis_passes(&sess,
hir_map,
+ analysis,
+ resolutions,
&arenas,
&name,
- resolve::MakeGlobMap::No,
|tcx, _, analysis, result| {
// Return if the driver hit an err (in `result`)
if let Err(_) = result {
try_err!(write!(&mut f, "{}", *implementor), &mydst);
}
- try_err!(write!(&mut f, r"implementors['{}'] = [", krate.name), &mydst);
+ try_err!(write!(&mut f, r#"implementors["{}"] = ["#, krate.name), &mydst);
for imp in imps {
// If the trait and implementation are in the same crate, then
// there's no need to emit information about it (there's inlining
containing_ver: Option<&'a str>) -> fmt::Result {
if let Some(v) = ver {
if containing_ver != ver && v.len() > 0 {
- write!(w, "<span class=\"since\">{}</span>",
+ write!(w, "<div class=\"since\">{}</div>",
v)?
}
}
render_header: bool, outer_version: Option<&str>) -> fmt::Result {
if render_header {
write!(w, "<h3 class='impl'><span class='in-band'><code>{}</code>", i.inner_impl())?;
- let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]);
- render_stability_since_raw(w, since, outer_version)?;
write!(w, "</span><span class='out-of-band'>")?;
+ let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]);
if let Some(l) = (Item { item: &i.impl_item, cx: cx }).href() {
+ write!(w, "<div class='ghost'></div>")?;
+ render_stability_since_raw(w, since, outer_version)?;
write!(w, "<a id='src-{}' class='srclink' \
href='{}' title='{}'>[src]</a>",
i.impl_item.def_id.index.as_usize(), l, "goto source code")?;
+ } else {
+ render_stability_since_raw(w, since, outer_version)?;
}
write!(w, "</span>")?;
write!(w, "</h3>\n")?;
h1.fqn {
border-bottom: 1px dashed;
margin-top: 0;
+ position: relative;
}
h2, h3:not(.impl):not(.method):not(.type):not(.tymethod), h4:not(.method):not(.type):not(.tymethod) {
border-bottom: 1px solid;
font-weight: 600;
margin-top: 10px;
margin-bottom: 10px;
+ position: relative;
}
h3.impl, h3.method, h3.type {
margin-top: 15px;
.content .out-of-band {
font-size: 23px;
- width: 40%;
margin: 0px;
padding: 0px;
text-align: right;
display: inline-block;
+ font-weight: normal;
+ position: absolute;
+ right: 0;
+}
+
+h3.impl > .out-of-band {
+ font-size: 21px;
+}
+
+h4 > code, h3 > code {
+ position: inherit;
+}
+
+.in-band, code {
+ z-index: 5;
}
.content .in-band {
- width: 60%;
margin: 0px;
padding: 0px;
display: inline-block;
}
+#main { position: relative; }
+#main > .since {
+ top: inherit;
+ font-family: "Fira Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
+}
+
.content table {
border-spacing: 0 5px;
border-collapse: separate;
opacity: 0.65;
}
-span.since {
- float: right;
+.since {
font-weight: normal;
font-size: initial;
color: grey;
+ position: absolute;
+ right: 0;
+ top: 0;
}
.variants_table {
color: #999;
}
+.ghost {
+ display: none;
+}
+
+.ghost + .since {
+ position: initial;
+ display: table-cell;
+}
+.since + .srclink {
+ display: table-cell;
+ padding-left: 10px;
+}
/* Media Queries */
h2, h3:not(.impl):not(.method):not(.type):not(.tymethod), h4:not(.method):not(.type):not(.tymethod) {
border-bottom-color: #DDDDDD;
}
+.in-band, code {
+ background-color: white;
+}
.docblock code {
background-color: #F5F5F5;
// trait impls for private items should be stripped
clean::ImplItem(clean::Impl{
- for_: clean::ResolvedPath{ did, .. }, ..
+ for_: clean::ResolvedPath{ did, is_generic, .. }, ..
}) => {
- if did.is_local() && !self.access_levels.is_exported(did) {
+ if did.is_local() && !is_generic && !self.access_levels.is_exported(did) {
return None;
}
}
fn fold_item(&mut self, i: Item) -> Option<Item> {
if let clean::ImplItem(ref imp) = i.inner {
if let Some(did) = imp.for_.def_id() {
- if did.is_local() && !self.retained.contains(&did) {
+ if did.is_local() && !imp.for_.is_generic() &&
+ !self.retained.contains(&did)
+ {
return None;
}
}
use rustc::session::{self, config};
use rustc::session::config::{get_unstable_features_setting, OutputType};
use rustc::session::search_paths::{SearchPaths, PathKind};
-use rustc::hir::lowering::{lower_crate, LoweringContext};
+use rustc::hir::lowering::{lower_crate, LoweringContext, DummyResolver};
use rustc_back::dynamic_lib::DynamicLibrary;
use rustc_back::tempdir::TempDir;
use rustc_driver::{driver, Compilation};
let dep_graph = DepGraph::new(false);
let defs = &RefCell::new(hir_map::collect_definitions(&krate));
- let lcx = LoweringContext::new(&sess, Some(&krate), defs);
+ let mut dummy_resolver = DummyResolver;
+ let lcx = LoweringContext::new(&sess, Some(&krate), &mut dummy_resolver);
let krate = lower_crate(&lcx, &krate);
let opts = scrape_test_config(&krate);
prog.push_str(&everything_else);
} else {
prog.push_str("fn main() {\n ");
- prog.push_str(&everything_else.replace("\n", "\n "));
+ prog.push_str(&everything_else);
prog = prog.trim().into();
prog.push_str("\n}");
}
use rustc::hir::map as hir_map;
use rustc::hir::def::Def;
-use rustc::middle::stability;
use rustc::middle::privacy::AccessLevel;
use rustc::hir;
fn stability(&self, id: ast::NodeId) -> Option<attr::Stability> {
self.cx.tcx_opt().and_then(|tcx| {
self.cx.map.opt_local_def_id(id)
- .and_then(|def_id| stability::lookup_stability(tcx, def_id))
+ .and_then(|def_id| tcx.lookup_stability(def_id))
.cloned()
})
}
fn deprecation(&self, id: ast::NodeId) -> Option<attr::Deprecation> {
self.cx.tcx_opt().and_then(|tcx| {
self.cx.map.opt_local_def_id(id)
- .and_then(|def_id| stability::lookup_deprecation(tcx, def_id))
+ .and_then(|def_id| tcx.lookup_deprecation(def_id))
})
}
let mut mem_buf = string::String::new();
let mut encoder = Encoder::new(&mut mem_buf);
let result = hm.encode(&mut encoder);
- match result.err().unwrap() {
+ match result.unwrap_err() {
EncoderError::BadHashmapKey => (),
_ => panic!("expected bad hash map key")
}
alloc = { path = "../liballoc" }
alloc_jemalloc = { path = "../liballoc_jemalloc", optional = true }
alloc_system = { path = "../liballoc_system" }
+panic_unwind = { path = "../libpanic_unwind" }
+panic_abort = { path = "../libpanic_abort" }
collections = { path = "../libcollections" }
core = { path = "../libcore" }
libc = { path = "../rustc/libc_shim" }
rand = { path = "../librand" }
rustc_unicode = { path = "../librustc_unicode" }
+unwind = { path = "../libunwind" }
[build-dependencies]
build_helper = { path = "../build_helper" }
}
if target.contains("linux") {
- if target.contains("musl") && (target.contains("x86_64") || target.contains("i686")) {
- println!("cargo:rustc-link-lib=static=unwind");
- } else if target.contains("android") {
+ if target.contains("android") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=gcc");
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=rt");
println!("cargo:rustc-link-lib=pthread");
- println!("cargo:rustc-link-lib=gcc_s");
}
} else if target.contains("freebsd") {
println!("cargo:rustc-link-lib=execinfo");
println!("cargo:rustc-link-lib=pthread");
- println!("cargo:rustc-link-lib=gcc_s");
} else if target.contains("dragonfly") || target.contains("bitrig") ||
target.contains("netbsd") || target.contains("openbsd") {
println!("cargo:rustc-link-lib=pthread");
-
- if target.contains("rumprun") {
- println!("cargo:rustc-link-lib=unwind");
- } else if target.contains("netbsd") {
- println!("cargo:rustc-link-lib=gcc_s");
- } else if target.contains("openbsd") {
- println!("cargo:rustc-link-lib=gcc");
- } else if target.contains("bitrig") {
- println!("cargo:rustc-link-lib=c++abi");
- } else if target.contains("dragonfly") {
- println!("cargo:rustc-link-lib=gcc_pic");
- }
} else if target.contains("apple-darwin") {
println!("cargo:rustc-link-lib=System");
} else if target.contains("apple-ios") {
println!("cargo:rustc-link-lib=framework=Security");
println!("cargo:rustc-link-lib=framework=Foundation");
} else if target.contains("windows") {
- if target.contains("windows-gnu") {
- println!("cargo:rustc-link-lib=gcc_eh");
- }
println!("cargo:rustc-link-lib=advapi32");
println!("cargo:rustc-link-lib=ws2_32");
println!("cargo:rustc-link-lib=userenv");
let tmpdir = tmpdir();
let dir = &tmpdir.join("mkdir_error_twice");
check!(fs::create_dir(dir));
- let e = fs::create_dir(dir).err().unwrap();
+ let e = fs::create_dir(dir).unwrap_err();
assert_eq!(e.kind(), ErrorKind::AlreadyExists);
}
let mut writer = BufWriter::new(PanicWriter);
let _ = writer.write(b"hello world");
let _ = writer.flush();
- }).join().err().unwrap();
+ }).join().unwrap_err();
assert_eq!(WRITES.load(Ordering::SeqCst), 1);
}
#![feature(on_unimplemented)]
#![feature(oom)]
#![feature(optin_builtin_traits)]
+#![feature(panic_unwind)]
#![feature(placement_in_syntax)]
#![feature(rand)]
#![feature(raw)]
#![allow(unused_features)] // std may use features in a platform-specific way
#![cfg_attr(not(stage0), deny(warnings))]
+// FIXME(stage0): after a snapshot, move needs_panic_runtime up above and remove
+// this `extern crate` declaration and feature(panic_unwind)
+#![cfg_attr(not(stage0), needs_panic_runtime)]
+#![cfg_attr(not(stage0), feature(needs_panic_runtime))]
+#[cfg(stage0)]
+extern crate panic_unwind as __please_just_link_me_dont_reference_me;
+
#[cfg(test)] extern crate test;
// We want to reexport a few macros from core but libcore has already been
extern crate rustc_unicode;
extern crate libc;
+// We always need an unwinder currently for backtraces
+extern crate unwind;
+
#[cfg(stage0)]
extern crate alloc_system;
/// The entry point for panic of Rust threads.
///
/// This macro is used to inject panic into a Rust thread, causing the thread to
-/// unwind and panic entirely. Each thread's panic can be reaped as the
-/// `Box<Any>` type, and the single-argument form of the `panic!` macro will be
-/// the value which is transmitted.
+/// panic entirely. Each thread's panic can be reaped as the `Box<Any>` type,
+/// and the single-argument form of the `panic!` macro will be the value which
+/// is transmitted.
///
/// The multi-argument form of this macro panics with a string and has the
/// `format!` syntax for building a string.
panic!("explicit panic")
});
($msg:expr) => ({
- $crate::rt::begin_unwind($msg, {
+ $crate::rt::begin_panic($msg, {
// static requires less code at runtime, more constant data
static _FILE_LINE: (&'static str, u32) = (file!(), line!());
&_FILE_LINE
})
});
($fmt:expr, $($arg:tt)+) => ({
- $crate::rt::begin_unwind_fmt(format_args!($fmt, $($arg)+), {
+ $crate::rt::begin_panic_fmt(&format_args!($fmt, $($arg)+), {
// The leading _'s are to avoid dead code warnings if this is
// used inside a dead function. Just `#[allow(dead_code)]` is
// insufficient, since the user may have
use boxed::Box;
use cell::UnsafeCell;
use ops::{Deref, DerefMut};
+use panicking;
use ptr::{Unique, Shared};
use rc::Rc;
use sync::{Arc, Mutex, RwLock};
-use sys_common::unwind;
use thread::Result;
#[unstable(feature = "panic_handler", issue = "30449")]
/// ```
#[stable(feature = "catch_unwind", since = "1.9.0")]
pub fn catch_unwind<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
- let mut result = None;
unsafe {
- let result = &mut result;
- unwind::try(move || *result = Some(f()))?
+ panicking::try(f)
}
- Ok(result.unwrap())
}
/// Deprecated, renamed to `catch_unwind`
/// ```
#[stable(feature = "resume_unwind", since = "1.9.0")]
pub fn resume_unwind(payload: Box<Any + Send>) -> ! {
- unwind::rust_panic(payload)
+ panicking::rust_panic(payload)
}
/// Deprecated, use resume_unwind instead
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+//! Implementation of various bits and pieces of the `panic!` macro and
+//! associated runtime pieces.
+//!
+//! Specifically, this module contains the implementation of:
+//!
+//! * Panic hooks
+//! * Executing a panic up to doing the actual implementation
+//! * Shims around "try"
+
use prelude::v1::*;
use io::prelude::*;
use any::Any;
use cell::Cell;
use cell::RefCell;
+use fmt;
use intrinsics;
+use mem;
+use raw;
use sync::StaticRwLock;
use sync::atomic::{AtomicBool, Ordering};
use sys::stdio::Stderr;
use sys_common::util;
use thread;
-thread_local! { pub static PANIC_COUNT: Cell<usize> = Cell::new(0) }
-
thread_local! {
pub static LOCAL_STDERR: RefCell<Option<Box<Write + Send>>> = {
RefCell::new(None)
}
}
+thread_local! { pub static PANIC_COUNT: Cell<usize> = Cell::new(0) }
+
+// Binary interface to the panic runtime that the standard library depends on.
+//
+// The standard library is tagged with `#![needs_panic_runtime]` (introduced in
+// RFC 1513) to indicate that it requires some other crate tagged with
+// `#![panic_runtime]` to exist somewhere. Each panic runtime is intended to
+// implement these symbols (with the same signatures) so we can get matched up
+// to them.
+//
+// One day this may look a little less ad-hoc with the compiler helping out to
+// hook up these functions, but it is not this day!
+#[allow(improper_ctypes)]
+extern {
+ fn __rust_maybe_catch_panic(f: fn(*mut u8),
+ data: *mut u8,
+ data_ptr: *mut usize,
+ vtable_ptr: *mut usize) -> u32;
+ #[unwind]
+ fn __rust_start_panic(data: usize, vtable: usize) -> u32;
+}
+
#[derive(Copy, Clone)]
enum Hook {
Default,
/// # Panics
///
/// Panics if called from a panicking thread.
-#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+#[unstable(feature = "panic_handler", issue = "30449")]
pub fn set_hook(hook: Box<Fn(&PanicInfo) + 'static + Sync + Send>) {
if thread::panicking() {
panic!("cannot modify the panic hook from a panicking thread");
/// # Panics
///
/// Panics if called from a panicking thread.
-#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+#[unstable(feature = "panic_handler", issue = "30449")]
pub fn take_hook() -> Box<Fn(&PanicInfo) + 'static + Sync + Send> {
if thread::panicking() {
panic!("cannot modify the panic hook from a panicking thread");
}
/// A struct providing information about a panic.
-#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+#[unstable(feature = "panic_handler", issue = "30449")]
pub struct PanicInfo<'a> {
payload: &'a (Any + Send),
location: Location<'a>,
/// Returns the payload associated with the panic.
///
/// This will commonly, but not always, be a `&'static str` or `String`.
- #[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+ #[unstable(feature = "panic_handler", issue = "30449")]
pub fn payload(&self) -> &(Any + Send) {
self.payload
}
///
/// This method will currently always return `Some`, but this may change
/// in future versions.
- #[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+ #[unstable(feature = "panic_handler", issue = "30449")]
pub fn location(&self) -> Option<&Location> {
Some(&self.location)
}
}
/// A struct containing information about the location of a panic.
-#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+#[unstable(feature = "panic_handler", issue = "30449")]
pub struct Location<'a> {
file: &'a str,
line: u32,
impl<'a> Location<'a> {
/// Returns the name of the source file from which the panic originated.
- #[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+ #[unstable(feature = "panic_handler", issue = "30449")]
pub fn file(&self) -> &str {
self.file
}
/// Returns the line number from which the panic originated.
- #[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
+ #[unstable(feature = "panic_handler", issue = "30449")]
pub fn line(&self) -> u32 {
self.line
}
}
fn default_hook(info: &PanicInfo) {
- let panics = PANIC_COUNT.with(|s| s.get());
+ let panics = PANIC_COUNT.with(|c| c.get());
// If this is a double panic, make sure that we print a backtrace
// for this panic. Otherwise only print it if logging is enabled.
}
}
-pub fn on_panic(obj: &(Any+Send), file: &'static str, line: u32) {
- let panics = PANIC_COUNT.with(|s| {
- let count = s.get() + 1;
- s.set(count);
- count
+/// Invoke a closure, capturing the cause of an unwinding panic if one occurs.
+pub unsafe fn try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<Any + Send>> {
+ let mut slot = None;
+ let mut f = Some(f);
+ let ret = PANIC_COUNT.with(|s| {
+ let prev = s.get();
+ s.set(0);
+
+ let mut to_run = || {
+ slot = Some(f.take().unwrap()());
+ };
+ let fnptr = get_call(&mut to_run);
+ let dataptr = &mut to_run as *mut _ as *mut u8;
+ let mut any_data = 0;
+ let mut any_vtable = 0;
+ let fnptr = mem::transmute::<fn(&mut _), fn(*mut u8)>(fnptr);
+ let r = __rust_maybe_catch_panic(fnptr,
+ dataptr,
+ &mut any_data,
+ &mut any_vtable);
+ s.set(prev);
+
+ if r == 0 {
+ Ok(())
+ } else {
+ Err(mem::transmute(raw::TraitObject {
+ data: any_data as *mut _,
+ vtable: any_vtable as *mut _,
+ }))
+ }
+ });
+
+ return ret.map(|()| {
+ slot.take().unwrap()
});
- // If this is the third nested call, on_panic triggered the last panic,
- // otherwise the double-panic check would have aborted the process.
- // Even if it is likely that on_panic was unable to log the backtrace,
- // abort immediately to avoid infinite recursion, so that attaching a
- // debugger provides a useable stacktrace.
- if panics >= 3 {
+ fn get_call<F: FnMut()>(_: &mut F) -> fn(&mut F) {
+ call
+ }
+
+ fn call<F: FnMut()>(f: &mut F) {
+ f()
+ }
+}
+
+/// Determines whether the current thread is unwinding because of panic.
+pub fn panicking() -> bool {
+ PANIC_COUNT.with(|c| c.get() != 0)
+}
+
+/// Entry point of panic from the libcore crate.
+#[cfg(not(test))]
+#[lang = "panic_fmt"]
+#[unwind]
+pub extern fn rust_begin_panic(msg: fmt::Arguments,
+ file: &'static str,
+ line: u32) -> ! {
+ begin_panic_fmt(&msg, &(file, line))
+}
+
+/// The entry point for panicking with a formatted message.
+///
+/// This is designed to reduce the amount of code required at the call
+/// site as much as possible (so that `panic!()` has as low an impact
+/// on (e.g.) the inlining of other functions as possible), by moving
+/// the actual formatting into this shared place.
+#[unstable(feature = "libstd_sys_internals",
+ reason = "used by the panic! macro",
+ issue = "0")]
+#[inline(never)] #[cold]
+pub fn begin_panic_fmt(msg: &fmt::Arguments,
+ file_line: &(&'static str, u32)) -> ! {
+ use fmt::Write;
+
+ // We do two allocations here, unfortunately. But (a) they're
+ // required with the current scheme, and (b) we don't handle
+ // panic + OOM properly anyway (see comment in begin_panic
+ // below).
+
+ let mut s = String::new();
+ let _ = s.write_fmt(*msg);
+ begin_panic(s, file_line)
+}
+
+/// This is the entry point of panicking for panic!() and assert!().
+#[unstable(feature = "libstd_sys_internals",
+ reason = "used by the panic! macro",
+ issue = "0")]
+#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
+pub fn begin_panic<M: Any + Send>(msg: M, file_line: &(&'static str, u32)) -> ! {
+ // Note that this should be the only allocation performed in this code path.
+ // Currently this means that panic!() on OOM will invoke this code path,
+ // but then again we're not really ready for panic on OOM anyway. If
+ // we do start doing this, then we should propagate this allocation to
+ // be performed in the parent of this thread instead of the thread that's
+ // panicking.
+
+ rust_panic_with_hook(Box::new(msg), file_line)
+}
+
+/// Executes the primary logic for a panic, including checking for recursive
+/// panics and panic hooks.
+///
+/// This is the entry point or panics from libcore, formatted panics, and
+/// `Box<Any>` panics. Here we'll verify that we're not panicking recursively,
+/// run panic hooks, and then delegate to the actual implementation of panics.
+#[inline(never)]
+#[cold]
+fn rust_panic_with_hook(msg: Box<Any + Send>,
+ file_line: &(&'static str, u32)) -> ! {
+ let (file, line) = *file_line;
+
+ let panics = PANIC_COUNT.with(|c| {
+ let prev = c.get();
+ c.set(prev + 1);
+ prev
+ });
+
+ // If this is the third nested call (e.g. panics == 2, this is 0-indexed),
+ // the panic hook probably triggered the last panic, otherwise the
+ // double-panic check would have aborted the process. In this case abort the
+ // process real quickly as we don't want to try calling it again as it'll
+ // probably just panic again.
+ if panics > 1 {
util::dumb_print(format_args!("thread panicked while processing \
panic. aborting.\n"));
unsafe { intrinsics::abort() }
}
- let info = PanicInfo {
- payload: obj,
- location: Location {
- file: file,
- line: line,
- },
- };
-
unsafe {
+ let info = PanicInfo {
+ payload: &*msg,
+ location: Location {
+ file: file,
+ line: line,
+ },
+ };
let _lock = HOOK_LOCK.read();
match HOOK {
Hook::Default => default_hook(&info),
}
}
- if panics >= 2 {
+ if panics > 0 {
// If a thread panics while it's already unwinding then we
// have limited options. Currently our preference is to
// just abort. In the future we may consider resuming
aborting.\n"));
unsafe { intrinsics::abort() }
}
+
+ rust_panic(msg)
+}
+
+/// A private no-mangle function on which to slap yer breakpoints.
+#[no_mangle]
+#[allow(private_no_mangle_fns)] // yes we get it, but we like breakpoints
+pub fn rust_panic(msg: Box<Any + Send>) -> ! {
+ let code = unsafe {
+ let obj = mem::transmute::<_, raw::TraitObject>(msg);
+ __rust_start_panic(obj.data as usize, obj.vtable as usize)
+ };
+ rtabort!("failed to initiate panic, error {}", code)
}
fn as_inner(&self) -> &imp::ExitStatus { &self.0 }
}
+impl FromInner<imp::ExitStatus> for ExitStatus {
+ fn from_inner(s: imp::ExitStatus) -> ExitStatus {
+ ExitStatus(s)
+ }
+}
+
#[stable(feature = "process", since = "1.0.0")]
impl fmt::Display for ExitStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Reexport some of our utilities which are expected by other crates.
-pub use sys_common::unwind::{begin_unwind, begin_unwind_fmt};
+pub use panicking::{begin_panic, begin_panic_fmt};
-// Rust runtime's startup objects depend on these symbols, so they must be public.
-// Since sys_common isn't public, we have to re-export them here.
-#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
-pub use sys_common::unwind::imp::eh_frame_registry::*;
+#[cfg(stage0)]
+pub use panicking::begin_panic as begin_unwind;
#[cfg(not(test))]
#[lang = "start"]
///
/// // This send will fail because the receiver is gone
/// drop(rx);
- /// assert_eq!(tx.send(1).err().unwrap().0, 1);
+ /// assert_eq!(tx.send(1).unwrap_err().0, 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Parsing of GCC-style Language-Specific Data Area (LSDA)
-//! For details see:
-//! http://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
-//! http://mentorembedded.github.io/cxx-abi/exceptions.pdf
-//! http://www.airs.com/blog/archives/460
-//! http://www.airs.com/blog/archives/464
-//!
-//! A reference implementation may be found in the GCC source tree
-//! (<root>/libgcc/unwind-c.c as of this writing)
-
-#![allow(non_upper_case_globals)]
-#![allow(unused)]
-
-use prelude::v1::*;
-use sys_common::dwarf::DwarfReader;
-use core::mem;
-
-pub const DW_EH_PE_omit : u8 = 0xFF;
-pub const DW_EH_PE_absptr : u8 = 0x00;
-
-pub const DW_EH_PE_uleb128 : u8 = 0x01;
-pub const DW_EH_PE_udata2 : u8 = 0x02;
-pub const DW_EH_PE_udata4 : u8 = 0x03;
-pub const DW_EH_PE_udata8 : u8 = 0x04;
-pub const DW_EH_PE_sleb128 : u8 = 0x09;
-pub const DW_EH_PE_sdata2 : u8 = 0x0A;
-pub const DW_EH_PE_sdata4 : u8 = 0x0B;
-pub const DW_EH_PE_sdata8 : u8 = 0x0C;
-
-pub const DW_EH_PE_pcrel : u8 = 0x10;
-pub const DW_EH_PE_textrel : u8 = 0x20;
-pub const DW_EH_PE_datarel : u8 = 0x30;
-pub const DW_EH_PE_funcrel : u8 = 0x40;
-pub const DW_EH_PE_aligned : u8 = 0x50;
-
-pub const DW_EH_PE_indirect : u8 = 0x80;
-
-#[derive(Copy, Clone)]
-pub struct EHContext {
- pub ip: usize, // Current instruction pointer
- pub func_start: usize, // Address of the current function
- pub text_start: usize, // Address of the code section
- pub data_start: usize, // Address of the data section
-}
-
-pub unsafe fn find_landing_pad(lsda: *const u8, context: &EHContext)
- -> Option<usize> {
- if lsda.is_null() {
- return None;
- }
-
- let func_start = context.func_start;
- let mut reader = DwarfReader::new(lsda);
-
- let start_encoding = reader.read::<u8>();
- // base address for landing pad offsets
- let lpad_base = if start_encoding != DW_EH_PE_omit {
- read_encoded_pointer(&mut reader, context, start_encoding)
- } else {
- func_start
- };
-
- let ttype_encoding = reader.read::<u8>();
- if ttype_encoding != DW_EH_PE_omit {
- // Rust doesn't analyze exception types, so we don't care about the type table
- reader.read_uleb128();
- }
-
- let call_site_encoding = reader.read::<u8>();
- let call_site_table_length = reader.read_uleb128();
- let action_table = reader.ptr.offset(call_site_table_length as isize);
- // Return addresses point 1 byte past the call instruction, which could
- // be in the next IP range.
- let ip = context.ip-1;
-
- while reader.ptr < action_table {
- let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding);
- let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding);
- let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding);
- let cs_action = reader.read_uleb128();
- // Callsite table is sorted by cs_start, so if we've passed the ip, we
- // may stop searching.
- if ip < func_start + cs_start {
- break
- }
- if ip < func_start + cs_start + cs_len {
- if cs_lpad != 0 {
- return Some(lpad_base + cs_lpad);
- } else {
- return None;
- }
- }
- }
- // IP range not found: gcc's C++ personality calls terminate() here,
- // however the rest of the languages treat this the same as cs_lpad == 0.
- // We follow this suit.
- None
-}
-
-#[inline]
-fn round_up(unrounded: usize, align: usize) -> usize {
- assert!(align.is_power_of_two());
- (unrounded + align - 1) & !(align - 1)
-}
-
-unsafe fn read_encoded_pointer(reader: &mut DwarfReader,
- context: &EHContext,
- encoding: u8) -> usize {
- assert!(encoding != DW_EH_PE_omit);
-
- // DW_EH_PE_aligned implies it's an absolute pointer value
- if encoding == DW_EH_PE_aligned {
- reader.ptr = round_up(reader.ptr as usize,
- mem::size_of::<usize>()) as *const u8;
- return reader.read::<usize>();
- }
-
- let mut result = match encoding & 0x0F {
- DW_EH_PE_absptr => reader.read::<usize>(),
- DW_EH_PE_uleb128 => reader.read_uleb128() as usize,
- DW_EH_PE_udata2 => reader.read::<u16>() as usize,
- DW_EH_PE_udata4 => reader.read::<u32>() as usize,
- DW_EH_PE_udata8 => reader.read::<u64>() as usize,
- DW_EH_PE_sleb128 => reader.read_sleb128() as usize,
- DW_EH_PE_sdata2 => reader.read::<i16>() as usize,
- DW_EH_PE_sdata4 => reader.read::<i32>() as usize,
- DW_EH_PE_sdata8 => reader.read::<i64>() as usize,
- _ => panic!()
- };
-
- result += match encoding & 0x70 {
- DW_EH_PE_absptr => 0,
- // relative to address of the encoded value, despite the name
- DW_EH_PE_pcrel => reader.ptr as usize,
- DW_EH_PE_textrel => { assert!(context.text_start != 0);
- context.text_start },
- DW_EH_PE_datarel => { assert!(context.data_start != 0);
- context.data_start },
- DW_EH_PE_funcrel => { assert!(context.func_start != 0);
- context.func_start },
- _ => panic!()
- };
-
- if encoding & DW_EH_PE_indirect != 0 {
- result = *(result as *const usize);
- }
-
- result
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Utilities for parsing DWARF-encoded data streams.
-//! See http://www.dwarfstd.org,
-//! DWARF-4 standard, Section 7 - "Data Representation"
-
-// This module is used only by x86_64-pc-windows-gnu for now, but we
-// are compiling it everywhere to avoid regressions.
-#![allow(unused)]
-
-pub mod eh;
-
-use prelude::v1::*;
-use core::mem;
-
-pub struct DwarfReader {
- pub ptr : *const u8
-}
-
-#[repr(C,packed)]
-struct Unaligned<T>(T);
-
-impl DwarfReader {
-
- pub fn new(ptr : *const u8) -> DwarfReader {
- DwarfReader {
- ptr : ptr
- }
- }
-
- // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned
- // on a 4-byte boundary. This may cause problems on platforms with strict
- // alignment requirements. By wrapping data in a "packed" struct, we are
- // telling the backend to generate "misalignment-safe" code.
- pub unsafe fn read<T:Copy>(&mut self) -> T {
- let Unaligned(result) = *(self.ptr as *const Unaligned<T>);
- self.ptr = self.ptr.offset(mem::size_of::<T>() as isize);
- result
- }
-
- // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable
- // Length Data".
- pub unsafe fn read_uleb128(&mut self) -> u64 {
- let mut shift : usize = 0;
- let mut result : u64 = 0;
- let mut byte : u8;
- loop {
- byte = self.read::<u8>();
- result |= ((byte & 0x7F) as u64) << shift;
- shift += 7;
- if byte & 0x80 == 0 {
- break;
- }
- }
- result
- }
-
- pub unsafe fn read_sleb128(&mut self) -> i64 {
- let mut shift : usize = 0;
- let mut result : u64 = 0;
- let mut byte : u8;
- loop {
- byte = self.read::<u8>();
- result |= ((byte & 0x7F) as u64) << shift;
- shift += 7;
- if byte & 0x80 == 0 {
- break;
- }
- }
- // sign-extend
- if shift < 8 * mem::size_of::<u64>() && (byte & 0x40) != 0 {
- result |= (!0 as u64) << shift;
- }
- result as i64
- }
-}
-
-#[test]
-fn dwarf_reader() {
- let encoded: &[u8] = &[1,
- 2, 3,
- 4, 5, 6, 7,
- 0xE5, 0x8E, 0x26,
- 0x9B, 0xF1, 0x59,
- 0xFF, 0xFF];
-
- let mut reader = DwarfReader::new(encoded.as_ptr());
-
- unsafe {
- assert!(reader.read::<u8>() == u8::to_be(1u8));
- assert!(reader.read::<u16>() == u16::to_be(0x0203));
- assert!(reader.read::<u32>() == u32::to_be(0x04050607));
-
- assert!(reader.read_uleb128() == 624485);
- assert!(reader.read_sleb128() == -624485);
-
- assert!(reader.read::<i8>() == i8::to_be(-1));
- }
-}
+++ /dev/null
-// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Unwind library interface
-
-#![allow(non_upper_case_globals)]
-#![allow(non_camel_case_types)]
-#![allow(non_snake_case)]
-#![allow(dead_code)] // these are just bindings
-
-#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
-pub use self::_Unwind_Action::*;
-#[cfg(target_arch = "arm")]
-pub use self::_Unwind_State::*;
-pub use self::_Unwind_Reason_Code::*;
-
-use libc;
-
-#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub enum _Unwind_Action {
- _UA_SEARCH_PHASE = 1,
- _UA_CLEANUP_PHASE = 2,
- _UA_HANDLER_FRAME = 4,
- _UA_FORCE_UNWIND = 8,
- _UA_END_OF_STACK = 16,
-}
-
-#[cfg(target_arch = "arm")]
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub enum _Unwind_State {
- _US_VIRTUAL_UNWIND_FRAME = 0,
- _US_UNWIND_FRAME_STARTING = 1,
- _US_UNWIND_FRAME_RESUME = 2,
- _US_ACTION_MASK = 3,
- _US_FORCE_UNWIND = 8,
- _US_END_OF_STACK = 16
-}
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub enum _Unwind_Reason_Code {
- _URC_NO_REASON = 0,
- _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
- _URC_FATAL_PHASE2_ERROR = 2,
- _URC_FATAL_PHASE1_ERROR = 3,
- _URC_NORMAL_STOP = 4,
- _URC_END_OF_STACK = 5,
- _URC_HANDLER_FOUND = 6,
- _URC_INSTALL_CONTEXT = 7,
- _URC_CONTINUE_UNWIND = 8,
- _URC_FAILURE = 9, // used only by ARM EABI
-}
-
-pub type _Unwind_Exception_Class = u64;
-
-pub type _Unwind_Word = libc::uintptr_t;
-
-#[cfg(target_arch = "x86")]
-pub const unwinder_private_data_size: usize = 5;
-
-#[cfg(target_arch = "x86_64")]
-pub const unwinder_private_data_size: usize = 6;
-
-#[cfg(all(target_arch = "arm", not(target_os = "ios")))]
-pub const unwinder_private_data_size: usize = 20;
-
-#[cfg(all(target_arch = "arm", target_os = "ios"))]
-pub const unwinder_private_data_size: usize = 5;
-
-#[cfg(target_arch = "aarch64")]
-pub const unwinder_private_data_size: usize = 2;
-
-#[cfg(target_arch = "mips")]
-pub const unwinder_private_data_size: usize = 2;
-
-#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
-pub const unwinder_private_data_size: usize = 2;
-
-#[cfg(target_arch = "asmjs")]
-// FIXME: Copied from arm. Need to confirm.
-pub const unwinder_private_data_size: usize = 20;
-
-#[repr(C)]
-pub struct _Unwind_Exception {
- pub exception_class: _Unwind_Exception_Class,
- pub exception_cleanup: _Unwind_Exception_Cleanup_Fn,
- pub private: [_Unwind_Word; unwinder_private_data_size],
-}
-
-pub enum _Unwind_Context {}
-
-pub type _Unwind_Exception_Cleanup_Fn =
- extern "C" fn(unwind_code: _Unwind_Reason_Code,
- exception: *mut _Unwind_Exception);
-
-#[cfg_attr(any(all(target_os = "linux", not(target_env = "musl")),
- target_os = "freebsd",
- target_os = "solaris",
- all(target_os = "linux",
- target_env = "musl",
- not(target_arch = "x86"),
- not(target_arch = "x86_64"))),
- link(name = "gcc_s"))]
-#[cfg_attr(all(target_os = "linux",
- target_env = "musl",
- any(target_arch = "x86", target_arch = "x86_64"),
- not(test)),
- link(name = "unwind", kind = "static"))]
-#[cfg_attr(any(target_os = "android", target_os = "openbsd"),
- link(name = "gcc"))]
-#[cfg_attr(all(target_os = "netbsd", not(target_vendor = "rumprun")),
- link(name = "gcc"))]
-#[cfg_attr(all(target_os = "netbsd", target_vendor = "rumprun"),
- link(name = "unwind"))]
-#[cfg_attr(target_os = "dragonfly",
- link(name = "gcc_pic"))]
-#[cfg_attr(target_os = "bitrig",
- link(name = "c++abi"))]
-#[cfg_attr(all(target_os = "windows", target_env="gnu"),
- link(name = "gcc_eh"))]
-extern "C" {
- // iOS on armv7 uses SjLj exceptions and requires to link
- // against corresponding routine (..._SjLj_...)
- #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
- #[unwind]
- pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception)
- -> _Unwind_Reason_Code;
-
- #[cfg(all(target_os = "ios", target_arch = "arm"))]
- #[unwind]
- fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception)
- -> _Unwind_Reason_Code;
-
- pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
-
- #[unwind]
- pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !;
-}
-
-// ... and now we just providing access to SjLj counterspart
-// through a standard name to hide those details from others
-// (see also comment above regarding _Unwind_RaiseException)
-#[cfg(all(target_os = "ios", target_arch = "arm"))]
-#[inline(always)]
-pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception)
- -> _Unwind_Reason_Code {
- _Unwind_SjLj_RaiseException(exc)
-}
pub mod at_exit_imp;
pub mod backtrace;
pub mod condvar;
-pub mod dwarf;
pub mod io;
-pub mod libunwind;
pub mod mutex;
pub mod net;
pub mod poison;
pub mod thread;
pub mod thread_info;
pub mod thread_local;
-pub mod unwind;
pub mod util;
pub mod wtf8;
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(private_no_mangle_fns)]
-
-use prelude::v1::*;
-
-use any::Any;
-use sys_common::libunwind as uw;
-
-struct Exception {
- uwe: uw::_Unwind_Exception,
- cause: Option<Box<Any + Send + 'static>>,
-}
-
-pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
- let exception: Box<_> = box Exception {
- uwe: uw::_Unwind_Exception {
- exception_class: rust_exception_class(),
- exception_cleanup: exception_cleanup,
- private: [0; uw::unwinder_private_data_size],
- },
- cause: Some(data),
- };
- let exception_param = Box::into_raw(exception) as *mut uw::_Unwind_Exception;
- let error = uw::_Unwind_RaiseException(exception_param);
- rtabort!("Could not unwind stack, error = {}", error as isize);
-
- extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
- exception: *mut uw::_Unwind_Exception) {
- unsafe {
- let _: Box<Exception> = Box::from_raw(exception as *mut Exception);
- }
- }
-}
-
-pub fn payload() -> *mut u8 {
- 0 as *mut u8
-}
-
-pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
- let my_ep = ptr as *mut Exception;
- let cause = (*my_ep).cause.take();
- uw::_Unwind_DeleteException(ptr as *mut _);
- cause.unwrap()
-}
-
-// Rust's exception class identifier. This is used by personality routines to
-// determine whether the exception was thrown by their own runtime.
-fn rust_exception_class() -> uw::_Unwind_Exception_Class {
- // M O Z \0 R U S T -- vendor, language
- 0x4d4f5a_00_52555354
-}
-
-// We could implement our personality routine in pure Rust, however exception
-// info decoding is tedious. More importantly, personality routines have to
-// handle various platform quirks, which are not fun to maintain. For this
-// reason, we attempt to reuse personality routine of the C language:
-// __gcc_personality_v0.
-//
-// Since C does not support exception catching, __gcc_personality_v0 simply
-// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
-// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
-//
-// This is pretty close to Rust's exception handling approach, except that Rust
-// does have a single "catch-all" handler at the bottom of each thread's stack.
-// So we have two versions of the personality routine:
-// - rust_eh_personality, used by all cleanup landing pads, which never catches,
-// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
-// - rust_eh_personality_catch, used only by rust_try(), which always catches.
-//
-// See also: rustc_trans::trans::intrinsic::trans_gnu_try
-
-#[cfg(all(not(target_arch = "arm"),
- not(all(windows, target_arch = "x86_64")),
- not(test)))]
-pub mod eabi {
- use sys_common::libunwind as uw;
- use libc::c_int;
-
- extern {
- fn __gcc_personality_v0(version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code;
- }
-
- #[lang = "eh_personality"]
- #[no_mangle]
- extern fn rust_eh_personality(
- version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
- unsafe {
- __gcc_personality_v0(version, actions, exception_class, ue_header,
- context)
- }
- }
-
- #[lang = "eh_personality_catch"]
- #[no_mangle]
- pub extern fn rust_eh_personality_catch(
- version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
-
- if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
- uw::_URC_HANDLER_FOUND // catch!
- }
- else { // cleanup phase
- unsafe {
- __gcc_personality_v0(version, actions, exception_class, ue_header,
- context)
- }
- }
- }
-}
-
-// iOS on armv7 is using SjLj exceptions and therefore requires to use
-// a specialized personality routine: __gcc_personality_sj0
-
-#[cfg(all(target_os = "ios", target_arch = "arm", not(test)))]
-pub mod eabi {
- use sys_common::libunwind as uw;
- use libc::c_int;
-
- extern {
- fn __gcc_personality_sj0(version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code;
- }
-
- #[lang = "eh_personality"]
- #[no_mangle]
- pub extern fn rust_eh_personality(
- version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
- unsafe {
- __gcc_personality_sj0(version, actions, exception_class, ue_header,
- context)
- }
- }
-
- #[lang = "eh_personality_catch"]
- #[no_mangle]
- pub extern fn rust_eh_personality_catch(
- version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
- if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
- uw::_URC_HANDLER_FOUND // catch!
- }
- else { // cleanup phase
- unsafe {
- __gcc_personality_sj0(version, actions, exception_class, ue_header,
- context)
- }
- }
- }
-}
-
-
-// ARM EHABI uses a slightly different personality routine signature,
-// but otherwise works the same.
-#[cfg(all(target_arch = "arm", not(target_os = "ios"), not(test)))]
-pub mod eabi {
- use sys_common::libunwind as uw;
- use libc::c_int;
-
- extern {
- fn __gcc_personality_v0(state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code;
- }
-
- #[lang = "eh_personality"]
- #[no_mangle]
- extern fn rust_eh_personality(
- state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
- unsafe {
- __gcc_personality_v0(state, ue_header, context)
- }
- }
-
- #[lang = "eh_personality_catch"]
- #[no_mangle]
- pub extern fn rust_eh_personality_catch(
- state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context
- ) -> uw::_Unwind_Reason_Code
- {
- // Backtraces on ARM will call the personality routine with
- // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
- // we want to continue unwinding the stack, otherwise all our backtraces
- // would end at __rust_try.
- if (state as c_int & uw::_US_ACTION_MASK as c_int)
- == uw::_US_VIRTUAL_UNWIND_FRAME as c_int
- && (state as c_int & uw::_US_FORCE_UNWIND as c_int) == 0 { // search phase
- uw::_URC_HANDLER_FOUND // catch!
- }
- else { // cleanup phase
- unsafe {
- __gcc_personality_v0(state, ue_header, context)
- }
- }
- }
-}
-
-// See docs in the `unwind` module.
-#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu", not(test)))]
-#[lang = "eh_unwind_resume"]
-#[unwind]
-unsafe extern fn rust_eh_unwind_resume(panic_ctx: *mut u8) -> ! {
- uw::_Unwind_Resume(panic_ctx as *mut uw::_Unwind_Exception);
-}
-
-#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
-pub mod eh_frame_registry {
- // The implementation of stack unwinding is (for now) deferred to libgcc_eh, however Rust
- // crates use these Rust-specific entry points to avoid potential clashes with GCC runtime.
- // See also: rtbegin.rs, `unwind` module.
-
- #[link(name = "gcc_eh")]
- #[cfg(not(cargobuild))]
- extern {}
-
- extern {
- fn __register_frame_info(eh_frame_begin: *const u8, object: *mut u8);
- fn __deregister_frame_info(eh_frame_begin: *const u8, object: *mut u8);
- }
- #[cfg(not(test))]
- #[no_mangle]
- #[unstable(feature = "libstd_sys_internals", issue = "0")]
- pub unsafe extern fn rust_eh_register_frames(eh_frame_begin: *const u8,
- object: *mut u8) {
- __register_frame_info(eh_frame_begin, object);
- }
- #[cfg(not(test))]
- #[no_mangle]
- #[unstable(feature = "libstd_sys_internals", issue = "0")]
- pub unsafe extern fn rust_eh_unregister_frames(eh_frame_begin: *const u8,
- object: *mut u8) {
- __deregister_frame_info(eh_frame_begin, object);
- }
-}
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Implementation of Rust stack unwinding
-//!
-//! For background on exception handling and stack unwinding please see
-//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
-//! documents linked from it.
-//! These are also good reads:
-//! http://mentorembedded.github.io/cxx-abi/abi-eh.html
-//! http://monoinfinito.wordpress.com/series/exception-handling-in-c/
-//! http://www.airs.com/blog/index.php?s=exception+frames
-//!
-//! ## A brief summary
-//!
-//! Exception handling happens in two phases: a search phase and a cleanup phase.
-//!
-//! In both phases the unwinder walks stack frames from top to bottom using
-//! information from the stack frame unwind sections of the current process's
-//! modules ("module" here refers to an OS module, i.e. an executable or a
-//! dynamic library).
-//!
-//! For each stack frame, it invokes the associated "personality routine", whose
-//! address is also stored in the unwind info section.
-//!
-//! In the search phase, the job of a personality routine is to examine exception
-//! object being thrown, and to decide whether it should be caught at that stack
-//! frame. Once the handler frame has been identified, cleanup phase begins.
-//!
-//! In the cleanup phase, the unwinder invokes each personality routine again.
-//! This time it decides which (if any) cleanup code needs to be run for
-//! the current stack frame. If so, the control is transferred to a special branch
-//! in the function body, the "landing pad", which invokes destructors, frees memory,
-//! etc. At the end of the landing pad, control is transferred back to the unwinder
-//! and unwinding resumes.
-//!
-//! Once stack has been unwound down to the handler frame level, unwinding stops
-//! and the last personality routine transfers control to the catch block.
-//!
-//! ## `eh_personality` and `eh_unwind_resume`
-//!
-//! These language items are used by the compiler when generating unwind info.
-//! The first one is the personality routine described above. The second one
-//! allows compilation target to customize the process of resuming unwind at the
-//! end of the landing pads. `eh_unwind_resume` is used only if `custom_unwind_resume`
-//! flag in the target options is set.
-//!
-//! ## Frame unwind info registration
-//!
-//! Each module's image contains a frame unwind info section (usually ".eh_frame").
-//! When a module is loaded/unloaded into the process, the unwinder must be informed
-//! about the location of this section in memory. The methods of achieving that vary
-//! by the platform.
-//! On some (e.g. Linux), the unwinder can discover unwind info sections on its own
-//! (by dynamically enumerating currently loaded modules via the dl_iterate_phdr() API
-//! and finding their ".eh_frame" sections);
-//! Others, like Windows, require modules to actively register their unwind info
-//! sections via unwinder API (see `rust_eh_register_frames`/`rust_eh_unregister_frames`).
-
-#![allow(dead_code)]
-#![allow(unused_imports)]
-
-use prelude::v1::*;
-
-use any::Any;
-use boxed;
-use cmp;
-use panicking::{self,PANIC_COUNT};
-use fmt;
-use intrinsics;
-use mem;
-use sync::atomic::{self, Ordering};
-use sys_common::mutex::Mutex;
-
-// The actual unwinding implementation is cfg'd here, and we've got two current
-// implementations. One goes through SEH on Windows and the other goes through
-// libgcc via the libunwind-like API.
-
-// *-pc-windows-msvc
-#[cfg(target_env = "msvc")]
-#[path = "seh.rs"] #[doc(hidden)]
-pub mod imp;
-
-// x86_64-pc-windows-gnu
-#[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))]
-#[path = "seh64_gnu.rs"] #[doc(hidden)]
-pub mod imp;
-
-// i686-pc-windows-gnu and all others
-#[cfg(any(unix, all(windows, target_arch = "x86", target_env = "gnu")))]
-#[path = "gcc.rs"] #[doc(hidden)]
-pub mod imp;
-
-/// Invoke a closure, capturing the cause of panic if one occurs.
-///
-/// This function will return `Ok(())` if the closure did not panic, and will
-/// return `Err(cause)` if the closure panics. The `cause` returned is the
-/// object with which panic was originally invoked.
-///
-/// This function also is unsafe for a variety of reasons:
-///
-/// * This is not safe to call in a nested fashion. The unwinding
-/// interface for Rust is designed to have at most one try/catch block per
-/// thread, not multiple. No runtime checking is currently performed to uphold
-/// this invariant, so this function is not safe. A nested try/catch block
-/// may result in corruption of the outer try/catch block's state, especially
-/// if this is used within a thread itself.
-///
-/// * It is not sound to trigger unwinding while already unwinding. Rust threads
-/// have runtime checks in place to ensure this invariant, but it is not
-/// guaranteed that a rust thread is in place when invoking this function.
-/// Unwinding twice can lead to resource leaks where some destructors are not
-/// run.
-pub unsafe fn try<F: FnOnce()>(f: F) -> Result<(), Box<Any + Send>> {
- let mut f = Some(f);
- return inner_try(try_fn::<F>, &mut f as *mut _ as *mut u8);
-
- fn try_fn<F: FnOnce()>(opt_closure: *mut u8) {
- let opt_closure = opt_closure as *mut Option<F>;
- unsafe { (*opt_closure).take().unwrap()(); }
- }
-}
-
-unsafe fn inner_try(f: fn(*mut u8), data: *mut u8)
- -> Result<(), Box<Any + Send>> {
- PANIC_COUNT.with(|s| {
- let prev = s.get();
- s.set(0);
-
- // The "payload" here is a platform-specific region of memory which is
- // used to transmit information about the exception being thrown from
- // the point-of-throw back to this location.
- //
- // A pointer to this data is passed to the `try` intrinsic itself,
- // allowing this function, the `try` intrinsic, imp::payload(), and
- // imp::cleanup() to all work in concert to transmit this information.
- //
- // More information about what this pointer actually is can be found in
- // each implementation as well as browsing the compiler source itself.
- let mut payload = imp::payload();
- let r = intrinsics::try(f, data, &mut payload as *mut _ as *mut _);
- s.set(prev);
- if r == 0 {
- Ok(())
- } else {
- Err(imp::cleanup(payload))
- }
- })
-}
-
-/// Determines whether the current thread is unwinding because of panic.
-pub fn panicking() -> bool {
- PANIC_COUNT.with(|s| s.get() != 0)
-}
-
-// An uninlined, unmangled function upon which to slap yer breakpoints
-#[inline(never)]
-#[no_mangle]
-#[allow(private_no_mangle_fns)]
-pub fn rust_panic(cause: Box<Any + Send + 'static>) -> ! {
- unsafe {
- imp::panic(cause)
- }
-}
-
-#[cfg(not(test))]
-/// Entry point of panic from the libcore crate.
-#[lang = "panic_fmt"]
-#[unwind]
-pub extern fn rust_begin_unwind(msg: fmt::Arguments,
- file: &'static str, line: u32) -> ! {
- begin_unwind_fmt(msg, &(file, line))
-}
-
-/// The entry point for unwinding with a formatted message.
-///
-/// This is designed to reduce the amount of code required at the call
-/// site as much as possible (so that `panic!()` has as low an impact
-/// on (e.g.) the inlining of other functions as possible), by moving
-/// the actual formatting into this shared place.
-#[unstable(feature = "libstd_sys_internals",
- reason = "used by the panic! macro",
- issue = "0")]
-#[inline(never)] #[cold]
-pub fn begin_unwind_fmt(msg: fmt::Arguments, file_line: &(&'static str, u32)) -> ! {
- use fmt::Write;
-
- // We do two allocations here, unfortunately. But (a) they're
- // required with the current scheme, and (b) we don't handle
- // panic + OOM properly anyway (see comment in begin_unwind
- // below).
-
- let mut s = String::new();
- let _ = s.write_fmt(msg);
- begin_unwind_inner(Box::new(s), file_line)
-}
-
-/// This is the entry point of unwinding for panic!() and assert!().
-#[unstable(feature = "libstd_sys_internals",
- reason = "used by the panic! macro",
- issue = "0")]
-#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
-pub fn begin_unwind<M: Any + Send>(msg: M, file_line: &(&'static str, u32)) -> ! {
- // Note that this should be the only allocation performed in this code path.
- // Currently this means that panic!() on OOM will invoke this code path,
- // but then again we're not really ready for panic on OOM anyway. If
- // we do start doing this, then we should propagate this allocation to
- // be performed in the parent of this thread instead of the thread that's
- // panicking.
-
- // see below for why we do the `Any` coercion here.
- begin_unwind_inner(Box::new(msg), file_line)
-}
-
-/// The core of the unwinding.
-///
-/// This is non-generic to avoid instantiation bloat in other crates
-/// (which makes compilation of small crates noticeably slower). (Note:
-/// we need the `Any` object anyway, we're not just creating it to
-/// avoid being generic.)
-///
-/// Doing this split took the LLVM IR line counts of `fn main() { panic!()
-/// }` from ~1900/3700 (-O/no opts) to 180/590.
-#[inline(never)] #[cold] // this is the slow path, please never inline this
-fn begin_unwind_inner(msg: Box<Any + Send>,
- file_line: &(&'static str, u32)) -> ! {
- let (file, line) = *file_line;
-
- // First, invoke the default panic handler.
- panicking::on_panic(&*msg, file, line);
-
- // Finally, perform the unwinding.
- rust_panic(msg);
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Windows SEH
-//!
-//! On Windows (currently only on MSVC), the default exception handling
-//! mechanism is Structured Exception Handling (SEH). This is quite different
-//! than Dwarf-based exception handling (e.g. what other unix platforms use) in
-//! terms of compiler internals, so LLVM is required to have a good deal of
-//! extra support for SEH.
-//!
-//! In a nutshell, what happens here is:
-//!
-//! 1. The `panic` function calls the standard Windows function `RaiseException`
-//! with a Rust-specific code, triggering the unwinding process.
-//! 2. All landing pads generated by the compiler use the personality function
-//! `__C_specific_handler` on 64-bit and `__except_handler3` on 32-bit,
-//! functions in the CRT, and the unwinding code in Windows will use this
-//! personality function to execute all cleanup code on the stack.
-//! 3. All compiler-generated calls to `invoke` have a landing pad set as a
-//! `cleanuppad` LLVM instruction, which indicates the start of the cleanup
-//! routine. The personality (in step 2, defined in the CRT) is responsible
-//! for running the cleanup routines.
-//! 4. Eventually the "catch" code in the `try` intrinsic (generated by the
-//! compiler) is executed, which will ensure that the exception being caught
-//! is indeed a Rust exception, indicating that control should come back to
-//! Rust. This is done via a `catchswitch` plus a `catchpad` instruction in
-//! LLVM IR terms, finally returning normal control to the program with a
-//! `catchret` instruction. The `try` intrinsic uses a filter function to
-//! detect what kind of exception is being thrown, and this detection is
-//! implemented as the msvc_try_filter language item below.
-//!
-//! Some specific differences from the gcc-based exception handling are:
-//!
-//! * Rust has no custom personality function, it is instead *always*
-//! __C_specific_handler or __except_handler3, so the filtering is done in a
-//! C++-like manner instead of in the personality function itself. Note that
-//! the precise codegen for this was lifted from an LLVM test case for SEH
-//! (this is the `__rust_try_filter` function below).
-//! * We've got some data to transmit across the unwinding boundary,
-//! specifically a `Box<Any + Send + 'static>`. Like with Dwarf exceptions
-//! these two pointers are stored as a payload in the exception itself. On
-//! MSVC, however, there's no need for an extra allocation because the call
-//! stack is preserved while filter functions are being executed. This means
-//! that the pointers are passed directly to `RaiseException` which are then
-//! recovered in the filter function to be written to the stack frame of the
-//! `try` intrinsic.
-//!
-//! [win64]: http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
-//! [llvm]: http://llvm.org/docs/ExceptionHandling.html#background-on-windows-exceptions
-
-use sys::c;
-
-// A code which indicates panics that originate from Rust. Note that some of the
-// upper bits are used by the system so we just set them to 0 and ignore them.
-// 0x 0 R S T
-const RUST_PANIC: c::DWORD = 0x00525354;
-
-pub use self::imp::*;
-
-mod imp {
- use prelude::v1::*;
-
- use any::Any;
- use mem;
- use raw;
- use super::RUST_PANIC;
- use sys::c;
-
- pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
- // As mentioned above, the call stack here is preserved while the filter
- // functions are running, so it's ok to pass stack-local arrays into
- // `RaiseException`.
- //
- // The two pointers of the `data` trait object are written to the stack,
- // passed to `RaiseException`, and they're later extracted by the filter
- // function below in the "custom exception information" section of the
- // `EXCEPTION_RECORD` type.
- let ptrs = mem::transmute::<_, raw::TraitObject>(data);
- let ptrs = [ptrs.data, ptrs.vtable];
- c::RaiseException(RUST_PANIC, 0, 2, ptrs.as_ptr() as *mut _);
- rtabort!("could not unwind stack");
- }
-
- pub fn payload() -> [usize; 2] {
- [0; 2]
- }
-
- pub unsafe fn cleanup(payload: [usize; 2]) -> Box<Any + Send + 'static> {
- mem::transmute(raw::TraitObject {
- data: payload[0] as *mut _,
- vtable: payload[1] as *mut _,
- })
- }
-
- // This is quite a special function, and it's not literally passed in as the
- // filter function for the `catchpad` of the `try` intrinsic. The compiler
- // actually generates its own filter function wrapper which will delegate to
- // this for the actual execution logic for whether the exception should be
- // caught. The reasons for this are:
- //
- // * Each architecture has a slightly different ABI for the filter function
- // here. For example on x86 there are no arguments but on x86_64 there are
- // two.
- // * This function needs access to the stack frame of the `try` intrinsic
- // which is using this filter as a catch pad. This is because the payload
- // of this exception, `Box<Any>`, needs to be transmitted to that
- // location.
- //
- // Both of these differences end up using a ton of weird llvm-specific
- // intrinsics, so it's actually pretty difficult to express the entire
- // filter function in Rust itself. As a compromise, the compiler takes care
- // of all the weird LLVM-specific and platform-specific stuff, getting to
- // the point where this function makes the actual decision about what to
- // catch given two parameters.
- //
- // The first parameter is `*mut EXCEPTION_POINTERS` which is some contextual
- // information about the exception being filtered, and the second pointer is
- // `*mut *mut [usize; 2]` (the payload here). This value points directly
- // into the stack frame of the `try` intrinsic itself, and we use it to copy
- // information from the exception onto the stack.
- #[lang = "msvc_try_filter"]
- #[cfg(not(test))]
- unsafe extern fn __rust_try_filter(eh_ptrs: *mut u8,
- payload: *mut u8) -> i32 {
- let eh_ptrs = eh_ptrs as *mut c::EXCEPTION_POINTERS;
- let payload = payload as *mut *mut [usize; 2];
- let record = &*(*eh_ptrs).ExceptionRecord;
- if record.ExceptionCode != RUST_PANIC {
- return 0
- }
- (**payload)[0] = record.ExceptionInformation[0] as usize;
- (**payload)[1] = record.ExceptionInformation[1] as usize;
- return 1
- }
-}
-
-// This is required by the compiler to exist (e.g. it's a lang item), but
-// it's never actually called by the compiler because __C_specific_handler
-// or _except_handler3 is the personality function that is always used.
-// Hence this is just an aborting stub.
-#[lang = "eh_personality"]
-#[cfg(not(test))]
-fn rust_eh_personality() {
- unsafe { ::intrinsics::abort() }
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Unwinding implementation of top of native Win64 SEH,
-//! however the unwind handler data (aka LSDA) uses GCC-compatible encoding.
-
-#![allow(bad_style)]
-#![allow(private_no_mangle_fns)]
-
-use prelude::v1::*;
-
-use any::Any;
-use sys_common::dwarf::eh;
-use core::mem;
-use core::ptr;
-use sys::c;
-
-// Define our exception codes:
-// according to http://msdn.microsoft.com/en-us/library/het71c37(v=VS.80).aspx,
-// [31:30] = 3 (error), 2 (warning), 1 (info), 0 (success)
-// [29] = 1 (user-defined)
-// [28] = 0 (reserved)
-// we define bits:
-// [24:27] = type
-// [0:23] = magic
-const ETYPE: c::DWORD = 0b1110_u32 << 28;
-const MAGIC: c::DWORD = 0x525354; // "RST"
-
-const RUST_PANIC: c::DWORD = ETYPE | (1 << 24) | MAGIC;
-
-#[repr(C)]
-struct PanicData {
- data: Box<Any + Send + 'static>
-}
-
-pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
- let panic_ctx = Box::new(PanicData { data: data });
- let params = [Box::into_raw(panic_ctx) as c::ULONG_PTR];
- c::RaiseException(RUST_PANIC,
- c::EXCEPTION_NONCONTINUABLE,
- params.len() as c::DWORD,
- ¶ms as *const c::ULONG_PTR);
- rtabort!("could not unwind stack");
-}
-
-pub fn payload() -> *mut u8 {
- 0 as *mut u8
-}
-
-pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send + 'static> {
- let panic_ctx = Box::from_raw(ptr as *mut PanicData);
- return panic_ctx.data;
-}
-
-// SEH doesn't support resuming unwinds after calling a landing pad like
-// libunwind does. For this reason, MSVC compiler outlines landing pads into
-// separate functions that can be called directly from the personality function
-// but are nevertheless able to find and modify stack frame of the "parent"
-// function.
-//
-// Since this cannot be done with libdwarf-style landing pads,
-// rust_eh_personality instead catches RUST_PANICs, runs the landing pad, then
-// reraises the exception.
-//
-// Note that it makes certain assumptions about the exception:
-//
-// 1. That RUST_PANIC is non-continuable, so no lower stack frame may choose to
-// resume execution.
-// 2. That the first parameter of the exception is a pointer to an extra data
-// area (PanicData).
-// Since these assumptions do not generally hold true for foreign exceptions
-// (system faults, C++ exceptions, etc), we make no attempt to invoke our
-// landing pads (and, thus, destructors!) for anything other than RUST_PANICs.
-// This is considered acceptable, because the behavior of throwing exceptions
-// through a C ABI boundary is undefined.
-
-#[lang = "eh_personality_catch"]
-#[cfg(not(test))]
-unsafe extern fn rust_eh_personality_catch(
- exceptionRecord: *mut c::EXCEPTION_RECORD,
- establisherFrame: c::LPVOID,
- contextRecord: *mut c::CONTEXT,
- dispatcherContext: *mut c::DISPATCHER_CONTEXT
-) -> c::EXCEPTION_DISPOSITION
-{
- rust_eh_personality(exceptionRecord, establisherFrame,
- contextRecord, dispatcherContext)
-}
-
-#[lang = "eh_personality"]
-#[cfg(not(test))]
-unsafe extern fn rust_eh_personality(
- exceptionRecord: *mut c::EXCEPTION_RECORD,
- establisherFrame: c::LPVOID,
- contextRecord: *mut c::CONTEXT,
- dispatcherContext: *mut c::DISPATCHER_CONTEXT
-) -> c::EXCEPTION_DISPOSITION
-{
- let er = &*exceptionRecord;
- let dc = &*dispatcherContext;
-
- if er.ExceptionFlags & c::EXCEPTION_UNWIND == 0 { // we are in the dispatch phase
- if er.ExceptionCode == RUST_PANIC {
- if let Some(lpad) = find_landing_pad(dc) {
- c::RtlUnwindEx(establisherFrame,
- lpad as c::LPVOID,
- exceptionRecord,
- er.ExceptionInformation[0] as c::LPVOID, // pointer to PanicData
- contextRecord,
- dc.HistoryTable);
- rtabort!("could not unwind");
- }
- }
- }
- c::ExceptionContinueSearch
-}
-
-#[cfg(not(test))]
-#[lang = "eh_unwind_resume"]
-#[unwind]
-unsafe extern fn rust_eh_unwind_resume(panic_ctx: c::LPVOID) -> ! {
- let params = [panic_ctx as c::ULONG_PTR];
- c::RaiseException(RUST_PANIC,
- c::EXCEPTION_NONCONTINUABLE,
- params.len() as c::DWORD,
- ¶ms as *const c::ULONG_PTR);
- rtabort!("could not resume unwind");
-}
-
-unsafe fn find_landing_pad(dc: &c::DISPATCHER_CONTEXT) -> Option<usize> {
- let eh_ctx = eh::EHContext {
- ip: dc.ControlPc as usize,
- func_start: dc.ImageBase as usize + (*dc.FunctionEntry).BeginAddress as usize,
- text_start: dc.ImageBase as usize,
- data_start: 0
- };
- eh::find_landing_pad(dc.HandlerData, &eh_ctx)
-}
use sync::StaticMutex;
use super::super::printing::print;
+use unwind as uw;
#[inline(never)] // if we know this is a function call, we can skip it when
// tracing
uw::_URC_NO_REASON
}
}
-
-/// Unwind library interface used for backtraces
-///
-/// Note that dead code is allowed as here are just bindings
-/// iOS doesn't use all of them it but adding more
-/// platform-specific configs pollutes the code too much
-#[allow(non_camel_case_types)]
-#[allow(non_snake_case)]
-mod uw {
- pub use self::_Unwind_Reason_Code::*;
-
- use libc;
-
- #[repr(C)]
- pub enum _Unwind_Reason_Code {
- _URC_NO_REASON = 0,
- _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
- _URC_FATAL_PHASE2_ERROR = 2,
- _URC_FATAL_PHASE1_ERROR = 3,
- _URC_NORMAL_STOP = 4,
- _URC_END_OF_STACK = 5,
- _URC_HANDLER_FOUND = 6,
- _URC_INSTALL_CONTEXT = 7,
- _URC_CONTINUE_UNWIND = 8,
- _URC_FAILURE = 9, // used only by ARM EABI
- }
-
- pub enum _Unwind_Context {}
-
- pub type _Unwind_Trace_Fn =
- extern fn(ctx: *mut _Unwind_Context,
- arg: *mut libc::c_void) -> _Unwind_Reason_Code;
-
- extern {
- // No native _Unwind_Backtrace on iOS
- #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
- pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
- trace_argument: *mut libc::c_void)
- -> _Unwind_Reason_Code;
-
- // available since GCC 4.2.0, should be fine for our purpose
- #[cfg(all(not(all(target_os = "android", target_arch = "arm")),
- not(all(target_os = "linux", target_arch = "arm"))))]
- pub fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
- ip_before_insn: *mut libc::c_int)
- -> libc::uintptr_t;
-
- #[cfg(all(not(target_os = "android"),
- not(all(target_os = "linux", target_arch = "arm"))))]
- pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
- -> *mut libc::c_void;
- }
-
- // On android, the function _Unwind_GetIP is a macro, and this is the
- // expansion of the macro. This is all copy/pasted directly from the
- // header file with the definition of _Unwind_GetIP.
- #[cfg(any(all(target_os = "android", target_arch = "arm"),
- all(target_os = "linux", target_arch = "arm")))]
- pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
- #[repr(C)]
- enum _Unwind_VRS_Result {
- _UVRSR_OK = 0,
- _UVRSR_NOT_IMPLEMENTED = 1,
- _UVRSR_FAILED = 2,
- }
- #[repr(C)]
- enum _Unwind_VRS_RegClass {
- _UVRSC_CORE = 0,
- _UVRSC_VFP = 1,
- _UVRSC_FPA = 2,
- _UVRSC_WMMXD = 3,
- _UVRSC_WMMXC = 4,
- }
- #[repr(C)]
- enum _Unwind_VRS_DataRepresentation {
- _UVRSD_UINT32 = 0,
- _UVRSD_VFPX = 1,
- _UVRSD_FPAX = 2,
- _UVRSD_UINT64 = 3,
- _UVRSD_FLOAT = 4,
- _UVRSD_DOUBLE = 5,
- }
-
- type _Unwind_Word = libc::c_uint;
- extern {
- fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
- klass: _Unwind_VRS_RegClass,
- word: _Unwind_Word,
- repr: _Unwind_VRS_DataRepresentation,
- data: *mut libc::c_void)
- -> _Unwind_VRS_Result;
- }
-
- let mut val: _Unwind_Word = 0;
- let ptr = &mut val as *mut _Unwind_Word;
- let _ = _Unwind_VRS_Get(ctx, _Unwind_VRS_RegClass::_UVRSC_CORE, 15,
- _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
- ptr as *mut libc::c_void);
- (val & !1) as libc::uintptr_t
- }
-
- // This function doesn't exist on Android or ARM/Linux, so make it same
- // to _Unwind_GetIP
- #[cfg(any(all(target_os = "android", target_arch = "arm"),
- all(target_os = "linux", target_arch = "arm")))]
- pub unsafe fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
- ip_before_insn: *mut libc::c_int)
- -> libc::uintptr_t
- {
- *ip_before_insn = 0;
- _Unwind_GetIP(ctx)
- }
-
- // This function also doesn't exist on Android or ARM/Linux, so make it
- // a no-op
- #[cfg(any(target_os = "android",
- all(target_os = "linux", target_arch = "arm")))]
- pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
- -> *mut libc::c_void
- {
- pc
- }
-}
/// Unix-specific extensions to `std::process::ExitStatus`
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ExitStatusExt {
+ /// Creates a new `ExitStatus` from the raw underlying `i32` return value of
+ /// a process.
+ #[unstable(feature = "exit_status_from", issue = "32713")]
+ fn from_raw(raw: i32) -> Self;
+
/// If the process was terminated by a signal, returns that signal.
#[stable(feature = "rust1", since = "1.0.0")]
fn signal(&self) -> Option<i32>;
#[stable(feature = "rust1", since = "1.0.0")]
impl ExitStatusExt for process::ExitStatus {
+ fn from_raw(raw: i32) -> Self {
+ process::ExitStatus::from_inner(From::from(raw))
+ }
+
fn signal(&self) -> Option<i32> {
self.as_inner().signal()
}
}
}
+impl From<c_int> for ExitStatus {
+ fn from(a: c_int) -> ExitStatus {
+ ExitStatus(a)
+ }
+}
+
impl fmt::Display for ExitStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(code) = self.code() {
pub const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
pub const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub const EXCEPTION_NONCONTINUABLE: DWORD = 0x1; // Noncontinuable exception
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub const EXCEPTION_UNWINDING: DWORD = 0x2; // Unwind is in progress
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub const EXCEPTION_EXIT_UNWIND: DWORD = 0x4; // Exit unwind is in progress
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub const EXCEPTION_TARGET_UNWIND: DWORD = 0x20; // Target unwind in progress
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub const EXCEPTION_COLLIDED_UNWIND: DWORD = 0x40; // Collided exception handler call
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub const EXCEPTION_UNWIND: DWORD = EXCEPTION_UNWINDING |
- EXCEPTION_EXIT_UNWIND |
- EXCEPTION_TARGET_UNWIND |
- EXCEPTION_COLLIDED_UNWIND;
pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001;
pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000;
pub s6_addr: [u8; 16],
}
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub enum UNWIND_HISTORY_TABLE {}
-
-#[repr(C)]
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub struct RUNTIME_FUNCTION {
- pub BeginAddress: DWORD,
- pub EndAddress: DWORD,
- pub UnwindData: DWORD,
-}
-
-#[repr(C)]
-#[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
-pub struct DISPATCHER_CONTEXT {
- pub ControlPc: LPVOID,
- pub ImageBase: LPVOID,
- pub FunctionEntry: *const RUNTIME_FUNCTION,
- pub EstablisherFrame: LPVOID,
- pub TargetIp: LPVOID,
- pub ContextRecord: *const CONTEXT,
- pub LanguageHandler: LPVOID,
- pub HandlerData: *const u8,
- pub HistoryTable: *const UNWIND_HISTORY_TABLE,
-}
-
#[repr(C)]
#[derive(Copy, Clone)]
#[allow(dead_code)] // we only use some variants
pbBuffer: *mut BYTE) -> BOOL;
pub fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) -> BOOL;
- #[unwind]
- #[cfg(any(target_arch = "x86_64", target_env = "msvc"))]
- pub fn RaiseException(dwExceptionCode: DWORD,
- dwExceptionFlags: DWORD,
- nNumberOfArguments: DWORD,
- lpArguments: *const ULONG_PTR);
- #[cfg(all(target_arch = "x86_64", target_env = "gnu"))]
- pub fn RtlUnwindEx(TargetFrame: LPVOID,
- TargetIp: LPVOID,
- ExceptionRecord: *const EXCEPTION_RECORD,
- ReturnValue: LPVOID,
- OriginalContext: *const CONTEXT,
- HistoryTable: *const UNWIND_HISTORY_TABLE);
pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME);
pub fn CreateEventW(lpEventAttributes: LPSECURITY_ATTRIBUTES,
self.into_inner().into_handle().into_raw() as *mut _
}
}
+
+/// Windows-specific extensions to `std::process::ExitStatus`
+#[unstable(feature = "exit_status_from", issue = "32713")]
+pub trait ExitStatusExt {
+ /// Creates a new `ExitStatus` from the raw underlying `u32` return value of
+ /// a process.
+ fn from_raw(raw: u32) -> Self;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ExitStatusExt for process::ExitStatus {
+ fn from_raw(raw: u32) -> Self {
+ process::ExitStatus::from_inner(From::from(raw))
+ }
+}
}
}
+impl From<c::DWORD> for ExitStatus {
+ fn from(u: c::DWORD) -> ExitStatus {
+ ExitStatus(u)
+ }
+}
+
impl fmt::Display for ExitStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "exit code: {}", self.0)
use any::Any;
use cell::UnsafeCell;
+use ffi::{CStr, CString};
use fmt;
use io;
+use panic;
+use panicking;
use str;
-use ffi::{CStr, CString};
use sync::{Mutex, Condvar, Arc};
use sys::thread as imp;
use sys_common::thread_info;
-use sys_common::unwind;
use sys_common::util;
use sys_common::{AsInner, IntoInner};
use time::Duration;
}
unsafe {
thread_info::set(imp::guard::current(), their_thread);
- let mut output = None;
- let try_result = {
- let ptr = &mut output;
- unwind::try(move || *ptr = Some(f()))
- };
- *their_packet.get() = Some(try_result.map(|()| {
- output.unwrap()
- }));
+ let try_result = panic::catch_unwind(panic::AssertUnwindSafe(f));
+ *their_packet.get() = Some(try_result);
}
};
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn panicking() -> bool {
- unwind::panicking()
+ panicking::panicking()
}
/// Puts the current thread to sleep for the specified amount of time.
}
}
+impl Pat {
+ pub fn walk<F>(&self, it: &mut F) -> bool
+ where F: FnMut(&Pat) -> bool
+ {
+ if !it(self) {
+ return false;
+ }
+
+ match self.node {
+ PatKind::Ident(_, _, Some(ref p)) => p.walk(it),
+ PatKind::Struct(_, ref fields, _) => {
+ fields.iter().all(|field| field.node.pat.walk(it))
+ }
+ PatKind::TupleStruct(_, Some(ref s)) | PatKind::Tup(ref s) => {
+ s.iter().all(|p| p.walk(it))
+ }
+ PatKind::Box(ref s) | PatKind::Ref(ref s, _) => {
+ s.walk(it)
+ }
+ PatKind::Vec(ref before, ref slice, ref after) => {
+ before.iter().all(|p| p.walk(it)) &&
+ slice.iter().all(|p| p.walk(it)) &&
+ after.iter().all(|p| p.walk(it))
+ }
+ PatKind::Wild |
+ PatKind::Lit(_) |
+ PatKind::Range(_, _) |
+ PatKind::Ident(_, _, _) |
+ PatKind::TupleStruct(..) |
+ PatKind::Path(..) |
+ PatKind::QPath(_, _) |
+ PatKind::Mac(_) => {
+ true
+ }
+ }
+ }
+}
+
/// A single field in a struct pattern
///
/// Patterns like the fields of Foo `{ x, ref y, ref mut z }`
let expr_file_line_ptr = self.expr_addr_of(span, expr_file_line_tuple);
self.expr_call_global(
span,
- self.std_path(&["rt", "begin_unwind"]),
+ self.std_path(&["rt", "begin_panic"]),
vec!(
self.expr_str(span, msg),
expr_file_line_ptr))
(active, simd_ffi, "1.0.0", Some(27731)),
(active, start, "1.0.0", Some(29633)),
(active, structural_match, "1.8.0", Some(31434)),
+ (active, panic_runtime, "1.10.0", Some(32837)),
+ (active, needs_panic_runtime, "1.10.0", Some(32837)),
// OIBIT specific features
(active, optin_builtin_traits, "1.0.0", Some(13231)),
attribute is an experimental \
feature",
cfg_fn!(needs_allocator))),
+ ("panic_runtime", Whitelisted, Gated("panic_runtime",
+ "the `#[panic_runtime]` attribute is \
+ an experimental feature",
+ cfg_fn!(panic_runtime))),
+ ("needs_panic_runtime", Whitelisted, Gated("needs_panic_runtime",
+ "the `#[needs_panic_runtime]` \
+ attribute is an experimental \
+ feature",
+ cfg_fn!(needs_panic_runtime))),
("rustc_variance", Normal, Gated("rustc_attrs",
"the `#[rustc_variance]` attribute \
is just used for rustc unit tests \
assert!(res.is_ok(),
"Op {} failed with 1 stack entry: {}",
cap,
- res.err().unwrap());
+ res.unwrap_err());
}
let caps = ["%+", "%-", "%*", "%/", "%m", "%&", "%|", "%A", "%O"];
for &cap in caps.iter() {
assert!(res.is_ok(),
"Binop {} failed with 2 stack entries: {}",
cap,
- res.err().unwrap());
+ res.unwrap_err());
}
}
for &(op, bs) in v.iter() {
let s = format!("%{{1}}%{{2}}%{}%d", op);
let res = expand(s.as_bytes(), &[], &mut Variables::new());
- assert!(res.is_ok(), res.err().unwrap());
+ assert!(res.is_ok(), res.unwrap_err());
assert_eq!(res.unwrap(), vec![b'0' + bs[0]]);
let s = format!("%{{1}}%{{1}}%{}%d", op);
let res = expand(s.as_bytes(), &[], &mut Variables::new());
- assert!(res.is_ok(), res.err().unwrap());
+ assert!(res.is_ok(), res.unwrap_err());
assert_eq!(res.unwrap(), vec![b'0' + bs[1]]);
let s = format!("%{{2}}%{{1}}%{}%d", op);
let res = expand(s.as_bytes(), &[], &mut Variables::new());
- assert!(res.is_ok(), res.err().unwrap());
+ assert!(res.is_ok(), res.unwrap_err());
assert_eq!(res.unwrap(), vec![b'0' + bs[2]]);
}
}
let mut vars = Variables::new();
let s = b"\\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m";
let res = expand(s, &[Number(1)], &mut vars);
- assert!(res.is_ok(), res.err().unwrap());
+ assert!(res.is_ok(), res.unwrap_err());
assert_eq!(res.unwrap(), "\\E[31m".bytes().collect::<Vec<_>>());
let res = expand(s, &[Number(8)], &mut vars);
- assert!(res.is_ok(), res.err().unwrap());
+ assert!(res.is_ok(), res.unwrap_err());
assert_eq!(res.unwrap(), "\\E[90m".bytes().collect::<Vec<_>>());
let res = expand(s, &[Number(42)], &mut vars);
- assert!(res.is_ok(), res.err().unwrap());
+ assert!(res.is_ok(), res.unwrap_err());
assert_eq!(res.unwrap(), "\\E[38;5;42m".bytes().collect::<Vec<_>>());
}
#![feature(set_stdio)]
#![feature(staged_api)]
#![feature(question_mark)]
+#![feature(panic_unwind)]
extern crate getopts;
extern crate term;
extern crate libc;
+extern crate panic_unwind;
pub use self::TestFn::*;
pub use self::ColorConfig::*;
--- /dev/null
+[package]
+authors = ["The Rust Project Developers"]
+name = "unwind"
+version = "0.0.0"
+build = "build.rs"
+
+[lib]
+name = "unwind"
+path = "lib.rs"
+
+[dependencies]
+core = { path = "../libcore" }
+libc = { path = "../rustc/libc_shim" }
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::env;
+
+fn main() {
+ println!("cargo:rustc-cfg=cargobuild");
+
+ let target = env::var("TARGET").unwrap();
+
+ if target.contains("linux") {
+ if target.contains("musl") && (target.contains("x86_64") || target.contains("i686")) {
+ println!("cargo:rustc-link-lib=static=unwind");
+ } else if !target.contains("android") {
+ println!("cargo:rustc-link-lib=gcc_s");
+ }
+ } else if target.contains("freebsd") {
+ println!("cargo:rustc-link-lib=gcc_s");
+ } else if target.contains("rumprun") {
+ println!("cargo:rustc-link-lib=unwind");
+ } else if target.contains("netbsd") {
+ println!("cargo:rustc-link-lib=gcc_s");
+ } else if target.contains("openbsd") {
+ println!("cargo:rustc-link-lib=gcc");
+ } else if target.contains("bitrig") {
+ println!("cargo:rustc-link-lib=c++abi");
+ } else if target.contains("dragonfly") {
+ println!("cargo:rustc-link-lib=gcc_pic");
+ } else if target.contains("windows-gnu") {
+ println!("cargo:rustc-link-lib=gcc_eh");
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![no_std]
+#![crate_name = "unwind"]
+#![crate_type = "rlib"]
+#![unstable(feature = "panic_unwind", issue = "32837")]
+#![cfg_attr(not(stage0), deny(warnings))]
+
+#![feature(cfg_target_vendor)]
+#![feature(staged_api)]
+#![feature(unwind_attributes)]
+
+#![cfg_attr(not(target_env = "msvc"), feature(libc))]
+
+#[cfg(not(target_env = "msvc"))]
+extern crate libc;
+
+#[cfg(not(target_env = "msvc"))]
+mod libunwind;
+#[cfg(not(target_env = "msvc"))]
+pub use libunwind::*;
+
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(bad_style)]
+
+use libc;
+
+#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
+pub use self::_Unwind_Action::*;
+#[cfg(target_arch = "arm")]
+pub use self::_Unwind_State::*;
+pub use self::_Unwind_Reason_Code::*;
+
+#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub enum _Unwind_Action {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16,
+}
+
+#[cfg(target_arch = "arm")]
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub enum _Unwind_State {
+ _US_VIRTUAL_UNWIND_FRAME = 0,
+ _US_UNWIND_FRAME_STARTING = 1,
+ _US_UNWIND_FRAME_RESUME = 2,
+ _US_ACTION_MASK = 3,
+ _US_FORCE_UNWIND = 8,
+ _US_END_OF_STACK = 16
+}
+
+#[repr(C)]
+pub enum _Unwind_Reason_Code {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+ _URC_FAILURE = 9, // used only by ARM EABI
+}
+
+pub type _Unwind_Exception_Class = u64;
+
+pub type _Unwind_Word = libc::uintptr_t;
+
+pub type _Unwind_Trace_Fn =
+ extern fn(ctx: *mut _Unwind_Context,
+ arg: *mut libc::c_void) -> _Unwind_Reason_Code;
+
+#[cfg(target_arch = "x86")]
+pub const unwinder_private_data_size: usize = 5;
+
+#[cfg(target_arch = "x86_64")]
+pub const unwinder_private_data_size: usize = 6;
+
+#[cfg(all(target_arch = "arm", not(target_os = "ios")))]
+pub const unwinder_private_data_size: usize = 20;
+
+#[cfg(all(target_arch = "arm", target_os = "ios"))]
+pub const unwinder_private_data_size: usize = 5;
+
+#[cfg(target_arch = "aarch64")]
+pub const unwinder_private_data_size: usize = 2;
+
+#[cfg(target_arch = "mips")]
+pub const unwinder_private_data_size: usize = 2;
+
+#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
+pub const unwinder_private_data_size: usize = 2;
+
+#[cfg(target_arch = "asmjs")]
+// FIXME: Copied from arm. Need to confirm.
+pub const unwinder_private_data_size: usize = 20;
+
+#[repr(C)]
+pub struct _Unwind_Exception {
+ pub exception_class: _Unwind_Exception_Class,
+ pub exception_cleanup: _Unwind_Exception_Cleanup_Fn,
+ pub private: [_Unwind_Word; unwinder_private_data_size],
+}
+
+pub enum _Unwind_Context {}
+
+pub type _Unwind_Exception_Cleanup_Fn =
+ extern "C" fn(unwind_code: _Unwind_Reason_Code,
+ exception: *mut _Unwind_Exception);
+
+#[cfg_attr(any(all(target_os = "linux", not(target_env = "musl")),
+ target_os = "freebsd",
+ target_os = "solaris",
+ all(target_os = "linux",
+ target_env = "musl",
+ not(target_arch = "x86"),
+ not(target_arch = "x86_64"))),
+ link(name = "gcc_s"))]
+#[cfg_attr(all(target_os = "linux",
+ target_env = "musl",
+ any(target_arch = "x86", target_arch = "x86_64"),
+ not(test)),
+ link(name = "unwind", kind = "static"))]
+#[cfg_attr(any(target_os = "android", target_os = "openbsd"),
+ link(name = "gcc"))]
+#[cfg_attr(all(target_os = "netbsd", not(target_vendor = "rumprun")),
+ link(name = "gcc"))]
+#[cfg_attr(all(target_os = "netbsd", target_vendor = "rumprun"),
+ link(name = "unwind"))]
+#[cfg_attr(target_os = "dragonfly",
+ link(name = "gcc_pic"))]
+#[cfg_attr(target_os = "bitrig",
+ link(name = "c++abi"))]
+#[cfg_attr(all(target_os = "windows", target_env = "gnu"),
+ link(name = "gcc_eh"))]
+#[cfg(not(cargobuild))]
+extern {}
+
+extern {
+ // iOS on armv7 uses SjLj exceptions and requires to link
+ // against corresponding routine (..._SjLj_...)
+ #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+ #[unwind]
+ pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception)
+ -> _Unwind_Reason_Code;
+
+ #[cfg(all(target_os = "ios", target_arch = "arm"))]
+ #[unwind]
+ fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception)
+ -> _Unwind_Reason_Code;
+
+ pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
+
+ #[unwind]
+ pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !;
+
+ // No native _Unwind_Backtrace on iOS
+ #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
+ pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
+ trace_argument: *mut libc::c_void)
+ -> _Unwind_Reason_Code;
+
+ // available since GCC 4.2.0, should be fine for our purpose
+ #[cfg(all(not(all(target_os = "android", target_arch = "arm")),
+ not(all(target_os = "linux", target_arch = "arm"))))]
+ pub fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
+ ip_before_insn: *mut libc::c_int)
+ -> libc::uintptr_t;
+
+ #[cfg(all(not(target_os = "android"),
+ not(all(target_os = "linux", target_arch = "arm"))))]
+ pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
+ -> *mut libc::c_void;
+}
+
+// ... and now we just providing access to SjLj counterspart
+// through a standard name to hide those details from others
+// (see also comment above regarding _Unwind_RaiseException)
+#[cfg(all(target_os = "ios", target_arch = "arm"))]
+#[inline]
+pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception)
+ -> _Unwind_Reason_Code {
+ _Unwind_SjLj_RaiseException(exc)
+}
+
+// On android, the function _Unwind_GetIP is a macro, and this is the
+// expansion of the macro. This is all copy/pasted directly from the
+// header file with the definition of _Unwind_GetIP.
+#[cfg(any(all(target_os = "android", target_arch = "arm"),
+ all(target_os = "linux", target_arch = "arm")))]
+pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
+ #[repr(C)]
+ enum _Unwind_VRS_Result {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2,
+ }
+ #[repr(C)]
+ enum _Unwind_VRS_RegClass {
+ _UVRSC_CORE = 0,
+ _UVRSC_VFP = 1,
+ _UVRSC_FPA = 2,
+ _UVRSC_WMMXD = 3,
+ _UVRSC_WMMXC = 4,
+ }
+ #[repr(C)]
+ enum _Unwind_VRS_DataRepresentation {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_FPAX = 2,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5,
+ }
+
+ type _Unwind_Word = libc::c_uint;
+ extern {
+ fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
+ klass: _Unwind_VRS_RegClass,
+ word: _Unwind_Word,
+ repr: _Unwind_VRS_DataRepresentation,
+ data: *mut libc::c_void)
+ -> _Unwind_VRS_Result;
+ }
+
+ let mut val: _Unwind_Word = 0;
+ let ptr = &mut val as *mut _Unwind_Word;
+ let _ = _Unwind_VRS_Get(ctx, _Unwind_VRS_RegClass::_UVRSC_CORE, 15,
+ _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
+ ptr as *mut libc::c_void);
+ (val & !1) as libc::uintptr_t
+}
+
+// This function doesn't exist on Android or ARM/Linux, so make it same
+// to _Unwind_GetIP
+#[cfg(any(all(target_os = "android", target_arch = "arm"),
+ all(target_os = "linux", target_arch = "arm")))]
+pub unsafe fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
+ ip_before_insn: *mut libc::c_int)
+ -> libc::uintptr_t
+{
+ *ip_before_insn = 0;
+ _Unwind_GetIP(ctx)
+}
+
+// This function also doesn't exist on Android or ARM/Linux, so make it
+// a no-op
+#[cfg(any(target_os = "android",
+ all(target_os = "linux", target_arch = "arm")))]
+pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void)
+ -> *mut libc::c_void
+{
+ pc
+}
-Subproject commit 751345228a0ef03fd147394bb5104359b7a808be
+Subproject commit a73c41e7f1c85cd814e9792fc6a6a8f8e31b8dd4
"std 0.0.0",
]
-[[package]]
-name = "advapi32-sys"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "winapi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
[[package]]
name = "alloc"
version = "0.0.0"
dependencies = [
"build_helper 0.1.0",
"core 0.0.0",
- "gcc 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.0.0",
]
[[package]]
name = "gcc"
-version = "0.3.17"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.0.0"
dependencies = [
- "advapi32-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "core 0.0.0",
]
[[package]]
-name = "libc"
+name = "panic_abort"
version = "0.0.0"
dependencies = [
"core 0.0.0",
+ "libc 0.0.0",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc 0.0.0",
+ "core 0.0.0",
+ "libc 0.0.0",
+ "unwind 0.0.0",
]
[[package]]
"build_helper 0.1.0",
"collections 0.0.0",
"core 0.0.0",
- "gcc 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.0.0",
+ "panic_abort 0.0.0",
+ "panic_unwind 0.0.0",
"rand 0.0.0",
"rustc_unicode 0.0.0",
+ "unwind 0.0.0",
]
[[package]]
-name = "winapi"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "winapi-build"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "core 0.0.0",
+ "libc 0.0.0",
+]
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2016-04-26
+2016-04-28
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C lto -C panic=abort -O
+// no-prefer-dynamic
+
+fn main() {
+ foo();
+}
+
+#[no_mangle]
+#[inline(never)]
+fn foo() {
+ let _a = Box::new(3);
+ bar();
+// CHECK-LABEL: foo
+// CHECK: call {{.*}} void @bar
+}
+
+#[inline(never)]
+#[no_mangle]
+fn bar() {
+ println!("hello!");
+}
mod submod {
pub static answer: i32 = 42;
- //~^ NOTE static variable defined here
}
use self::submod::answer;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let tup = (0, 1, 2);
+ // the case where we show a suggestion
+ let _ = tup[0];
+ //~^ ERROR cannot index a value of type
+ //~| HELP to access tuple elements, use tuple indexing syntax as shown
+ //~| SUGGESTION let _ = tup.0
+
+ // the case where we show just a general hint
+ let i = 0_usize;
+ let _ = tup[i];
+ //~^ ERROR cannot index a value of type
+ //~| HELP to access tuple elements, use tuple indexing syntax (e.g. `tuple.0`)
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// forbid-output: &mut mut self
+
+struct Struct;
+
+impl Struct {
+ fn foo(&mut self) {
+ (&mut self).bar();
+ //~^ ERROR cannot borrow immutable argument `self` as mutable
+ // ... and no SUGGESTION that suggests `&mut mut self`
+ }
+
+ // In this case we could keep the suggestion, but to distinguish the
+ // two cases is pretty hard. It's an obscure case anyway.
+ fn bar(self: &mut Self) {
+ (&mut self).bar();
+ //~^ ERROR cannot borrow immutable argument `self` as mutable
+ }
+}
+
+fn main () {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure that the spans of import errors are correct.
+
+use abc::one_el;
+//~^ ERROR 13:5: 13:16
+use abc::{a, bbb, cccccc};
+//~^ ERROR 15:11: 15:12
+//~^^ ERROR 15:14: 15:17
+//~^^^ ERROR 15:19: 15:25
+use a_very_long_name::{el, el2};
+//~^ ERROR 19:24: 19:26
+//~^^ ERROR 19:28: 19:31
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=abort -C prefer-dynamic
+// ignore-musl - no dylibs here
+// error-pattern:`panic_unwind` is not compiled with this crate's panic strategy
+
+// This is a test where the local crate, compiled with `panic=abort`, links to
+// the standard library **dynamically** which is already linked against
+// `panic=unwind`. We should fail because the linked panic runtime does not
+// correspond with our `-C panic` option.
+//
+// Note that this test assumes that the dynamic version of the standard library
+// is linked to `panic_unwind`, which is currently the case.
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![feature(needs_panic_runtime)]
+#![crate_type = "rlib"]
+#![needs_panic_runtime]
+#![no_std]
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=abort
+// no-prefer-dynamic
+
+#![feature(panic_runtime)]
+#![crate_type = "rlib"]
+
+#![no_std]
+#![panic_runtime]
+
+#[no_mangle]
+pub extern fn __rust_maybe_catch_panic() {}
+
+#[no_mangle]
+pub extern fn __rust_start_panic() {}
+
+#[no_mangle]
+pub extern fn rust_eh_personality() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+
+#![no_std]
+#![feature(lang_items)]
+
+#[lang = "panic_fmt"]
+fn panic_fmt() {}
+#[lang = "eh_personality"]
+fn eh_personality() {}
+#[lang = "eh_unwind_resume"]
+fn eh_unwind_resume() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=unwind
+// no-prefer-dynamic
+
+#![feature(panic_runtime)]
+#![crate_type = "rlib"]
+
+#![no_std]
+#![panic_runtime]
+
+#[no_mangle]
+pub extern fn __rust_maybe_catch_panic() {}
+
+#[no_mangle]
+pub extern fn __rust_start_panic() {}
+
+#[no_mangle]
+pub extern fn rust_eh_personality() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=unwind
+// no-prefer-dynamic
+
+#![feature(panic_runtime)]
+#![crate_type = "rlib"]
+
+#![no_std]
+#![panic_runtime]
+
+#[no_mangle]
+pub extern fn __rust_maybe_catch_panic() {}
+
+#[no_mangle]
+pub extern fn __rust_start_panic() {}
+
+#[no_mangle]
+pub extern fn rust_eh_personality() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![feature(panic_runtime)]
+#![crate_type = "rlib"]
+#![panic_runtime]
+#![no_std]
+
+extern crate needs_panic_runtime;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=abort
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+#![no_std]
+
+extern crate panic_runtime_abort;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+#![no_std]
+
+extern crate panic_runtime_unwind;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=foo
+// error-pattern:either `panic` or `abort` was expected
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic
+// error-pattern:requires either `panic` or `abort`
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:is not compiled with this crate's panic strategy `abort`
+// compile-flags:-C panic=abort
+
+#![feature(test)]
+
+extern crate test;
+
+fn main() {
+}
+
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![panic_runtime] //~ ERROR: is an experimental feature
+#![needs_panic_runtime] //~ ERROR: is an experimental feature
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:needs-panic-runtime.rs
+// aux-build:runtime-depending-on-panic-runtime.rs
+// error-pattern:cannot depend on a crate that needs a panic runtime
+
+extern crate runtime_depending_on_panic_runtime;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:panic-runtime-unwind.rs
+// aux-build:panic-runtime-abort.rs
+// aux-build:wants-panic-runtime-unwind.rs
+// aux-build:wants-panic-runtime-abort.rs
+// aux-build:panic-runtime-lang-items.rs
+// error-pattern: is not compiled with this crate's panic strategy `unwind`
+
+#![no_std]
+
+extern crate wants_panic_runtime_unwind;
+extern crate wants_panic_runtime_abort;
+extern crate panic_runtime_lang_items;
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:cannot link together two panic runtimes: panic_runtime_unwind and panic_runtime_unwind2
+// ignore-tidy-linelength
+// aux-build:panic-runtime-unwind.rs
+// aux-build:panic-runtime-unwind2.rs
+// aux-build:panic-runtime-lang-items.rs
+
+#![no_std]
+
+extern crate panic_runtime_unwind;
+extern crate panic_runtime_unwind2;
+extern crate panic_runtime_lang_items;
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:is not compiled with this crate's panic strategy `abort`
+// aux-build:panic-runtime-unwind.rs
+// compile-flags:-C panic=abort
+
+extern crate panic_runtime_unwind;
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:is not compiled with this crate's panic strategy `abort`
+// aux-build:panic-runtime-unwind.rs
+// aux-build:wants-panic-runtime-unwind.rs
+// compile-flags:-C panic=abort
+
+extern crate wants_panic_runtime_unwind;
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:is incompatible with this crate's strategy of `unwind`
+// aux-build:panic-runtime-abort.rs
+// aux-build:panic-runtime-lang-items.rs
+
+#![no_std]
+
+extern crate panic_runtime_abort;
+extern crate panic_runtime_lang_items;
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:is incompatible with this crate's strategy of `unwind`
+// aux-build:panic-runtime-abort.rs
+// aux-build:wants-panic-runtime-abort.rs
+// aux-build:panic-runtime-lang-items.rs
+
+#![no_std]
+
+extern crate wants_panic_runtime_abort;
+extern crate panic_runtime_lang_items;
+
+fn main() {}
use rustc::session::config::{self, basic_options, build_configuration, Input, Options};
use rustc::session::build_session;
use rustc_driver::{driver, abort_on_err};
-use rustc::hir::lowering::{lower_crate, LoweringContext};
use rustc_resolve::MakeGlobMap;
use rustc_metadata::creader::LocalCrateReader;
use rustc_metadata::cstore::CStore;
let krate = driver::assign_node_ids(&sess, krate);
let defs = RefCell::new(ast_map::collect_definitions(&krate));
LocalCrateReader::new(&sess, &cstore, &defs, &krate, &id).read_crates(&dep_graph);
- let lcx = LoweringContext::new(&sess, Some(&krate), &defs);
- let mut hir_forest = ast_map::Forest::new(lower_crate(&lcx, &krate), dep_graph);
+ let (analysis, resolutions, mut hir_forest) = {
+ let defs = &mut *defs.borrow_mut();
+ driver::lower_and_resolve(&sess, &id, defs, &krate, dep_graph, MakeGlobMap::No)
+ };
let arenas = ty::CtxtArenas::new();
let ast_map = ast_map::map_crate(&mut hir_forest, &defs);
abort_on_err(driver::phase_3_run_analysis_passes(
- &sess, ast_map, &arenas, &id,
- MakeGlobMap::No, |tcx, mir_map, analysis, _| {
+ &sess, ast_map, analysis, resolutions, &arenas, &id,
+ |tcx, mir_map, analysis, _| {
let trans = driver::phase_4_translate_to_llvm(tcx, mir_map.unwrap(), analysis);
fn bar /* 62#0 */() { let x /* 59#2 */ = 1; y /* 61#4 */ + x /* 59#5 */ }
+
+fn y /* 61#0 */() { }
let x = 1;
foo!(x)
}
+
+fn y() {}
thread::spawn(move|| {
let _a = A;
lib::callback(|| panic!());
- }).join().err().unwrap();
+ }).join().unwrap_err();
unsafe {
assert_eq!(lib::statik, 1);
use rustc::mir::transform::{self, MirPass, MirSource};
use rustc::mir::repr::{Mir, Literal};
use rustc::mir::visit::MutVisitor;
-use rustc::ty;
+use rustc::ty::TyCtxt;
use rustc::middle::const_val::ConstVal;
use rustc_const_math::ConstInt;
use rustc_plugin::Registry;
impl transform::Pass for Pass {}
impl<'tcx> MirPass<'tcx> for Pass {
- fn run_pass(&mut self, _: &ty::TyCtxt<'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+ fn run_pass<'a>(&mut self, _: TyCtxt<'a, 'tcx, 'tcx>,
+ _: MirSource, mir: &mut Mir<'tcx>) {
Visitor.visit_mir(mir)
}
}
let output = Command::new(&me).arg("bad").before_exec(|| {
Err(Error::from_raw_os_error(102))
- }).output().err().unwrap();
+ }).output().unwrap_err();
assert_eq!(output.raw_os_error(), Some(102));
let pid = unsafe { libc::getpid() };
#![deny(const_err)]
+const X: *const u8 = b"" as _;
fn main() {
let _ = ((-1 as i8) << 8 - 1) as f32;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#[allow(unused)]
fn main() {
|| {
'label: loop {
}
};
+
+ // More cases added from issue 31754
+
+ 'label2: loop {
+ break;
+ }
+
+ let closure = || {
+ 'label2: loop {}
+ };
+
+ fn inner_fn() {
+ 'label2: loop {}
+ }
}
p
}
+#[rustc_mir]
+fn coerce_ptr_wrapper_poly<'a, T, Trait: ?Sized>(p: PtrWrapper<'a, T>)
+ -> PtrWrapper<'a, Trait>
+ where PtrWrapper<'a, T>: CoerceUnsized<PtrWrapper<'a, Trait>>
+{
+ p
+}
fn main() {
let a = [0,1,2];
let z = coerce_fat_ptr_wrapper(PtrWrapper(2,3,(),&square_local));
assert_eq!((z.3)(6), 36);
+
+ let z: PtrWrapper<Fn(u32) -> u32> =
+ coerce_ptr_wrapper_poly(PtrWrapper(2,3,(),&square_local));
+ assert_eq!((z.3)(6), 36);
}
f()
}
+#[rustc_mir]
+fn test_fn_transmute_zst(x: ()) -> [(); 1] {
+ fn id<T>(x: T) -> T {x}
+
+ id(unsafe {
+ std::mem::transmute(x)
+ })
+}
+
fn main() {
assert_eq!(test1(1, (2, 3), &[4, 5, 6]), (1, (2, 3), &[4, 5, 6][..]));
assert_eq!(test2(98), 98);
assert_eq!(test_fn_direct_call(&closure, 100, 4), 324);
assert_eq!(test_fn_nil_call(&(|| 42)), 42);
+ assert_eq!(test_fn_transmute_zst(()), [()]);
}
}
#[rustc_mir]
-fn test(a: i64, b: i64, c: i64, d: i64, e: i64, f: i64) -> i64 {
+fn test<T, U>(a: i64, b: i64, c: i64, d: i64, e: i64, f: T, g: U) -> i64 {
unsafe {
rust_interesting_average(6, a, a as f64,
b, b as f64,
c, c as f64,
d, d as f64,
e, e as f64,
- f, f as f64) as i64
+ f, g) as i64
}
}
fn main(){
- assert_eq!(test(10, 20, 30, 40, 50, 60), 70);
+ assert_eq!(test(10, 20, 30, 40, 50, 60_i64, 60.0_f64), 70);
}
thread::spawn(move|| -> () {
let _a = A;
panic!();
- }).join().err().unwrap();
+ }).join().unwrap_err();
assert!(unsafe { !HIT });
}
panic!("hi there");
});
- panic::propagate(result.err().unwrap());
+ panic::propagate(result.unwrap_err());
}).join();
- let msg = *result.err().unwrap().downcast::<&'static str>().unwrap();
+ let msg = *result.unwrap_err().downcast::<&'static str>().unwrap();
assert_eq!("hi there", msg);
assert_eq!(1, A.load(Ordering::SeqCst));
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=abort
+// aux-build:exit-success-if-unwind.rs
+// no-prefer-dynamic
+
+extern crate exit_success_if_unwind;
+
+use std::process::Command;
+use std::env;
+
+fn main() {
+ let mut args = env::args_os();
+ let me = args.next().unwrap();
+
+ if let Some(s) = args.next() {
+ if &*s == "foo" {
+ exit_success_if_unwind::bar(do_panic);
+ }
+ }
+ let s = Command::new(env::args_os().next().unwrap()).arg("foo").status();
+ assert!(s.unwrap().code() != Some(0));
+}
+
+fn do_panic() {
+ panic!("try to catch me");
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=abort
+// no-prefer-dynamic
+
+use std::process::Command;
+use std::env;
+
+struct Bomb;
+
+impl Drop for Bomb {
+ fn drop(&mut self) {
+ std::process::exit(0);
+ }
+}
+
+fn main() {
+ let mut args = env::args_os();
+ let me = args.next().unwrap();
+
+ if let Some(s) = args.next() {
+ if &*s == "foo" {
+
+ let _bomb = Bomb;
+
+ panic!("try to catch me");
+ }
+ }
+ let s = Command::new(env::args_os().next().unwrap()).arg("foo").status();
+ assert!(s.unwrap().code() != Some(0));
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "rlib"]
+
+struct Bomb;
+
+impl Drop for Bomb {
+ fn drop(&mut self) {
+ std::process::exit(0);
+ }
+}
+
+pub fn bar(f: fn()) {
+ let _bomb = Bomb;
+ f();
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C panic=abort
+// no-prefer-dynamic
+
+#![feature(panic_abort)]
+
+extern crate panic_abort;
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![feature(panic_unwind)]
+
+extern crate panic_unwind;
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C lto -C panic=abort
+// no-prefer-dynamic
+
+use std::process::Command;
+use std::env;
+
+struct Bomb;
+
+impl Drop for Bomb {
+ fn drop(&mut self) {
+ std::process::exit(0);
+ }
+}
+
+fn main() {
+ let mut args = env::args_os();
+ let me = args.next().unwrap();
+
+ if let Some(s) = args.next() {
+ if &*s == "foo" {
+
+ let _bomb = Bomb;
+
+ panic!("try to catch me");
+ }
+ }
+ let s = Command::new(env::args_os().next().unwrap()).arg("foo").status();
+ assert!(s.unwrap().code() != Some(0));
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-C lto -C panic=unwind
+// no-prefer-dynamic
+
+use std::process::Command;
+use std::env;
+
+struct Bomb;
+
+impl Drop for Bomb {
+ fn drop(&mut self) {
+ println!("hurray you ran me");
+ }
+}
+
+fn main() {
+ let mut args = env::args_os();
+ let me = args.next().unwrap();
+
+ if let Some(s) = args.next() {
+ if &*s == "foo" {
+
+ let _bomb = Bomb;
+
+ panic!("try to catch me");
+ }
+ }
+ let s = Command::new(env::args_os().next().unwrap()).arg("foo").output();
+ let s = s.unwrap();
+ assert!(!s.status.success());
+ assert!(String::from_utf8_lossy(&s.stdout).contains("hurray you ran me"));
+}
}
fn main() {
- thread::spawn(move|| { ::b::g() }).join().err().unwrap();
+ thread::spawn(move|| { ::b::g() }).join().unwrap_err();
}
fn test_panic() {
fn f() { let _x: Box<isize> = panic!(); }
- thread::spawn(move|| f() ).join().err().unwrap();
+ thread::spawn(move|| f() ).join().unwrap_err();
}
fn test_panic_indirect() {
fn f() -> ! { panic!(); }
fn g() { let _x: Box<isize> = f(); }
- thread::spawn(move|| g() ).join().err().unwrap();
+ thread::spawn(move|| g() ).join().unwrap_err();
}
pub fn main() {
let _b = Foo;
}).join();
- let s = x.err().unwrap().downcast::<&'static str>().unwrap();
+ let s = x.unwrap_err().downcast::<&'static str>().unwrap();
assert_eq!(&**s, "This panic should happen.");
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:--test
+
+/// ```
+/// let a = r#"
+/// foo
+/// bar"#;
+/// let b = "\nfoo\nbar";
+/// assert_eq!(a, b);
+/// ```
+pub fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt;
+
+// @has issue_29503/trait.MyTrait.html
+pub trait MyTrait {
+ fn my_string(&self) -> String;
+}
+
+// @has - "//ul[@id='implementors-list']/li" "impl<T> MyTrait for T where T: Debug"
+impl<T> MyTrait for T where T: fmt::Debug {
+ fn my_string(&self) -> String {
+ format!("{:?}", self)
+ }
+}
+
+pub fn main() {
+}
if krate == "alloc_jemalloc" && toml.contains("name = \"std\"") {
continue
}
+ if krate == "panic_abort" && toml.contains("name = \"std\"") {
+ continue
+ }
if !librs.contains(&format!("extern crate {}", krate)) {
println!("{} doesn't have `extern crate {}`, but Cargo.toml \