# our configure script, so disable auto submodule management.
git:
submodules: false
+ depth: 1
before_install:
- docker build -t rust -f src/etc/Dockerfile src/etc
opt inject-std-version 1 "inject the current compiler version of libstd into programs"
opt llvm-version-check 1 "check if the LLVM version is supported, build anyway"
opt rustbuild 0 "use the rust and cargo based build system"
-opt orbit 0 "get MIR where it belongs - everywhere; most importantly, in orbit"
+opt orbit 1 "get MIR where it belongs - everywhere; most importantly, in orbit"
opt codegen-tests 1 "run the src/test/codegen tests"
opt option-checking 1 "complain about unrecognized options in this configure script"
opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)"
if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi
if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi
-if [ -n "$CFG_ENABLE_ORBIT" ]; then putvar CFG_ENABLE_ORBIT; fi
+if [ -n "$CFG_DISABLE_ORBIT" ]; then putvar CFG_DISABLE_ORBIT; fi
step_msg "looking for build programs"
err "bad LLVM version: $LLVM_VERSION, need >=3.7"
;;
esac
+
+ if "$CFG_LLVM_ROOT/bin/llvm-mc" -help | grep -- "-relocation-model"; then
+ msg "found older llvm-mc"
+ CFG_LLVM_MC_HAS_RELOCATION_MODEL=1
+ putvar CFG_LLVM_MC_HAS_RELOCATION_MODEL
+ fi
fi
# Even when the user overrides the choice of CC, still try to detect
;;
- x86_64-*-musl)
+ x86_64-*-musl | arm-*-musleabi)
if [ ! -f $CFG_MUSL_ROOT/lib/libc.a ]
then
err "musl libc $CFG_MUSL_ROOT/lib/libc.a not found"
--- /dev/null
+# This file is intentially left empty to indicate that, while this target is
+# supported, it's not supported using plain GNU Make builds. Use a --rustbuild
+# instead.
\ No newline at end of file
--- /dev/null
+# This file is intentially left empty to indicate that, while this target is
+# supported, it's not supported using plain GNU Make builds. Use a --rustbuild
+# instead.
\ No newline at end of file
--- /dev/null
+# This file is intentially left empty to indicate that, while this target is
+# supported, it's not supported using plain GNU Make builds. Use a --rustbuild
+# instead.
\ No newline at end of file
CFG_RUSTC_FLAGS += -g
endif
-ifdef CFG_ENABLE_ORBIT
- $(info cfg: launching MIR (CFG_ENABLE_ORBIT))
- CFG_RUSTC_FLAGS += -Z orbit
+ifdef CFG_DISABLE_ORBIT
+ $(info cfg: HOLD HOLD HOLD (CFG_DISABLE_ORBIT))
+ CFG_RUSTC_FLAGS += -Z orbit=off
endif
ifdef SAVE_TEMPS
LLVM_MC_RELOCATION_MODEL="default"
endif
+ # LLVM changed this flag in 3.9
+ ifdef CFG_LLVM_MC_HAS_RELOCATION_MODEL
+ LLVM_MC_RELOC_FLAG := -relocation-model=$$(LLVM_MC_RELOCATION_MODEL)
+ else
+ LLVM_MC_RELOC_FLAG := -position-independent
+ endif
+
# We're using llvm-mc as our assembler because it supports
# .cfi pseudo-ops on mac
CFG_ASSEMBLE_$(1)=$$(CPP_$(1)) -E $$(2) | \
$$(LLVM_MC_$$(CFG_BUILD)) \
-assemble \
- -relocation-model=$$(LLVM_MC_RELOCATION_MODEL) \
+ $$(LLVM_MC_RELOC_FLAG) \
-filetype=obj \
-triple=$(1) \
-o=$$(1)
endif
ifeq ($$(findstring msvc,$(1)),)
+
+ifeq ($$(findstring freebsd,$(1)),)
COMPRT_OBJS_$(1) += gcc_personality_v0.o
+endif
+
COMPRT_OBJS_$(1) += emutls.o
ifeq ($$(findstring x86_64,$(1)),x86_64)
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"gcc 0.3.31 (git+https://github.com/alexcrichton/gcc-rs)",
"getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"md5 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
[[package]]
name = "kernel32-sys"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "libc"
-version = "0.2.9"
+version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
}
add_to_sysroot(&out_dir, &libdir);
- if target.contains("musl") &&
- (target.contains("x86_64") || target.contains("i686")) {
+ if target.contains("musl") && !target.contains("mips") {
copy_third_party_objects(build, target, &libdir);
}
}
// libstd features
pub debug_jemalloc: bool,
pub use_jemalloc: bool,
+ pub backtrace: bool, // support for RUST_BACKTRACE
// misc
pub channel: String,
debuginfo: Option<bool>,
debug_jemalloc: Option<bool>,
use_jemalloc: Option<bool>,
+ backtrace: Option<bool>,
default_linker: Option<String>,
default_ar: Option<String>,
channel: Option<String>,
let mut config = Config::default();
config.llvm_optimize = true;
config.use_jemalloc = true;
+ config.backtrace = true;
config.rust_optimize = true;
config.rust_optimize_tests = true;
config.submodules = true;
set(&mut config.rust_rpath, rust.rpath);
set(&mut config.debug_jemalloc, rust.debug_jemalloc);
set(&mut config.use_jemalloc, rust.use_jemalloc);
+ set(&mut config.backtrace, rust.backtrace);
set(&mut config.channel, rust.channel.clone());
config.rustc_default_linker = rust.default_linker.clone();
config.rustc_default_ar = rust.default_ar.clone();
# Whether or not jemalloc is built with its debug option set
#debug-jemalloc = false
+# Whether or not `panic!`s generate backtraces (RUST_BACKTRACE)
+#backtrace = true
+
# The default linker that will be used by the generated compiler. Note that this
# is not the linker used to link said compiler.
#default-linker = "cc"
if self.config.use_jemalloc {
features.push_str(" jemalloc");
}
+ if self.config.backtrace {
+ features.push_str(" backtrace");
+ }
return features
}
]);
}
} else {
- sources.push("gcc_personality_v0.c");
+ if !target.contains("freebsd") {
+ sources.push("gcc_personality_v0.c");
+ }
if target.contains("x86_64") {
sources.extend(vec![
pub fn check(build: &mut Build) {
let mut checked = HashSet::new();
let path = env::var_os("PATH").unwrap_or(OsString::new());
+ // On Windows, quotes are invalid characters for filename paths, and if
+ // one is present as part of the PATH then that can lead to the system
+ // being unable to identify the files properly. See
+ // https://github.com/rust-lang/rust/issues/34959 for more details.
+ if cfg!(windows) {
+ if path.to_string_lossy().contains("\"") {
+ panic!("PATH contains invalid character '\"'");
+ }
+ }
let mut need_cmd = |cmd: &OsStr| {
if !checked.insert(cmd.to_owned()) {
return
}
// Make sure musl-root is valid if specified
- if target.contains("musl") && (target.contains("x86_64") || target.contains("i686")) {
+ if target.contains("musl") && !target.contains("mips") {
match build.config.musl_root {
Some(ref root) => {
if fs::metadata(root.join("lib/libc.a")).is_err() {
-Subproject commit ac3d1cda612edccb6f1da53cbf7716e248405f3b
+Subproject commit 8598065bd965d9713bfafb6c1e766d63a7b17b89
three separate traits to overload with:
```rust
+# #![feature(unboxed_closures)]
# mod foo {
pub trait Fn<Args> : FnMut<Args> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output;
# some_closure(1) }
```
-Because `Fn` is a trait, we can bound our generic with it. In this case, our
-closure takes a `i32` as an argument and returns an `i32`, and so the generic
-bound we use is `Fn(i32) -> i32`.
+Because `Fn` is a trait, we can use it as a bound for our generic type. In
+this case, our closure takes a `i32` as an argument and returns an `i32`, and
+so the generic bound we use is `Fn(i32) -> i32`.
There’s one other key point here: because we’re bounding a generic with a
trait, this will get monomorphized, and therefore, we’ll be doing static
# The "nullable pointer optimization"
-Certain types are defined to not be NULL. This includes references (`&T`,
-`&mut T`), boxes (`Box<T>`), and function pointers (`extern "abi" fn()`).
-When interfacing with C, pointers that might be NULL are often used.
-As a special case, a generic `enum` that contains exactly two variants, one of
-which contains no data and the other containing a single field, is eligible
-for the "nullable pointer optimization". When such an enum is instantiated
-with one of the non-nullable types, it is represented as a single pointer,
-and the non-data variant is represented as the NULL pointer. So
-`Option<extern "C" fn(c_int) -> c_int>` is how one represents a nullable
-function pointer using the C ABI.
+Certain Rust types are defined to never be `null`. This includes references (`&T`,
+`&mut T`), boxes (`Box<T>`), and function pointers (`extern "abi" fn()`). When
+interfacing with C, pointers that might be `null` are often used, which would seem to
+require some messy `transmute`s and/or unsafe code to handle conversions to/from Rust types.
+However, the language provides a workaround.
+
+As a special case, an `enum` is eligible for the "nullable pointer optimization" if it contains
+exactly two variants, one of which contains no data and the other contains a field of one of the
+non-nullable types listed above. This means no extra space is required for a discriminant; rather,
+the empty variant is represented by putting a `null` value into the non-nullable field. This is
+called an "optimization", but unlike other optimizations it is guaranteed to apply to eligible
+types.
+
+The most common type that takes advantage of the nullable pointer optimization is `Option<T>`,
+where `None` corresponds to `null`. So `Option<extern "C" fn(c_int) -> c_int>` is a correct way
+to represent a nullable function pointer using the C ABI (corresponding to the C type
+`int (*)(int)`).
+
+Here is a contrived example. Let's say some C library has a facility for registering a
+callback, which gets called in certain situations. The callback is passed a function pointer
+and an integer and it is supposed to run the function with the integer as a parameter. So
+we have function pointers flying across the FFI boundary in both directions.
+
+```rust
+# #![feature(libc)]
+extern crate libc;
+use libc::c_int;
+
+# #[cfg(hidden)]
+extern "C" {
+ /// Register the callback.
+ fn register(cb: Option<extern "C" fn(Option<extern "C" fn(c_int) -> c_int>, c_int) -> c_int>);
+}
+# unsafe fn register(_: Option<extern "C" fn(Option<extern "C" fn(c_int) -> c_int>,
+# c_int) -> c_int>)
+# {}
+
+/// This fairly useless function receives a function pointer and an integer
+/// from C, and returns the result of calling the function with the integer.
+/// In case no function is provided, it squares the integer by default.
+extern "C" fn apply(process: Option<extern "C" fn(c_int) -> c_int>, int: c_int) -> c_int {
+ match process {
+ Some(f) => f(int),
+ None => int * int
+ }
+}
+
+fn main() {
+ unsafe {
+ register(Some(apply));
+ }
+}
+```
+
+And the code on the C side looks like this:
+
+```c
+void register(void (*f)(void (*)(int), int)) {
+ ...
+}
+```
+
+No `transmute` required!
# Calling Rust code from C
and one for deallocation. A freestanding program that uses the `Box`
sugar for dynamic allocations via `malloc` and `free`:
-```rust
+```rust,ignore
#![feature(lang_items, box_syntax, start, libc)]
#![no_std]
libedit-dev zlib1g-dev \
llvm-3.7-tools cmake
-# When we compile compiler-rt we pass it the llvm-config we just installed on
-# the system, but unfortunately it doesn't infer correctly where
-# LLVMConfig.cmake is so we need to coerce it a bit...
-RUN mkdir -p /usr/lib/llvm-3.7/build/share/llvm
-RUN ln -s /usr/share/llvm-3.7/cmake /usr/lib/llvm-3.7/build/share/llvm/cmake
-
RUN mkdir /build
WORKDIR /build
cp ${PREFIX}/bin/rustc${BIN_SUF} ${TARG_DIR}/stage0/bin/
cp ${PREFIX}/${LIB_DIR}/${RUSTLIBDIR}/${TARG_DIR}/${LIB_DIR}/* ${TARG_DIR}/stage0/${LIB_DIR}/
+cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}arena*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/
cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}extra*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/
cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}rust*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/
cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}std*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/
lib = lib.strip()[2:]
elif lib[0] == '-':
lib = lib.strip()[1:]
+ # If this actually points at a literal file then we're on MSVC which now
+ # prints full paths, so get just the name of the library and strip off the
+ # trailing ".lib"
+ elif os.path.exists(lib):
+ lib = os.path.basename(lib)[:-4]
+ elif lib[-4:] == '.lib':
+ lib = lib[:-4]
f.write("#[link(name = \"" + lib + "\"")
if not llvm_shared and 'LLVM' in lib:
f.write(", kind = \"static\"")
[build-dependencies]
build_helper = { path = "../build_helper" }
-gcc = "0.3.17"
+gcc = "0.3.27"
[features]
debug = []
.replace("\\", "/"))
.current_dir(&build_dir)
.env("CC", compiler.path())
- .env("EXTRA_CFLAGS", cflags)
+ .env("EXTRA_CFLAGS", cflags.clone())
+ // jemalloc generates Makefile deps using GCC's "-MM" flag. This means
+ // that GCC will run the preprocessor, and only the preprocessor, over
+ // jemalloc's source files. If we don't specify CPPFLAGS, then at least
+ // on ARM that step fails with a "Missing implementation for 32-bit
+ // atomic operations" error. This is because no "-march" flag will be
+ // passed to GCC, and then GCC won't define the
+ // "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4" macro that jemalloc needs to
+ // select an atomic operation implementation.
+ .env("CPPFLAGS", cflags.clone())
.env("AR", &ar)
.env("RANLIB", format!("{} s", ar.display()));
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
-//! For example, these:
+//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
-//! // Hello {arg 0 (x)} is {arg 1 (0.01) with precision specified inline (5)}
+//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
-//! // Hello {arg 1 (x)} is {arg 2 (0.01) with precision specified in arg 0 (5)}
+//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
-//! // Hello {arg 0 (x)} is {arg 2 (0.01) with precision specified in arg 1 (5)}
+//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
-//! // Hello {next arg (x)} is {second of next two args (0.01) with precision
+//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
-//! // Hello {next arg (x)} is {arg 2 (0.01) with precision
+//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
-//! // Hello {next arg (x)} is {arg "number" (0.01) with precision specified
+//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
-//! All print the same thing:
-//!
-//! ```text
-//! Hello x is 0.01000
-//! ```
-//!
//! While these:
//!
//! ```
#![feature(specialization)]
#![feature(staged_api)]
#![feature(step_by)]
-#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag)]
/// ```
/// use std::collections::LinkedList;
///
- /// let mut a = LinkedList::new();
- /// let mut b = LinkedList::new();
- /// a.push_back(1);
- /// a.push_back(2);
- /// b.push_back(3);
- /// b.push_back(4);
+ /// let mut list1 = LinkedList::new();
+ /// list1.push_back('a');
///
- /// a.append(&mut b);
+ /// let mut list2 = LinkedList::new();
+ /// list2.push_back('b');
+ /// list2.push_back('c');
///
- /// for e in &a {
- /// println!("{}", e); // prints 1, then 2, then 3, then 4
- /// }
- /// println!("{}", b.len()); // prints 0
+ /// list1.append(&mut list2);
+ ///
+ /// let mut iter = list1.iter();
+ /// assert_eq!(iter.next(), Some(&'a'));
+ /// assert_eq!(iter.next(), Some(&'b'));
+ /// assert_eq!(iter.next(), Some(&'c'));
+ /// assert!(iter.next().is_none());
+ ///
+ /// assert!(list2.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn append(&mut self, other: &mut Self) {
///
/// # Example
///
- /// Print the slice two elements at a time (i.e. `[1,2]`,
- /// `[3,4]`, `[5]`):
- ///
- /// ```rust
- /// let v = &[1, 2, 3, 4, 5];
- ///
- /// for chunk in v.chunks(2) {
- /// println!("{:?}", chunk);
- /// }
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks(2);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert_eq!(iter.next().unwrap(), &['m']);
+ /// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
}
}
+#[stable(feature = "stringfromchars", since = "1.12.0")]
+impl<'a> From<&'a [char]> for String {
+ #[inline]
+ fn from(v: &'a [char]) -> String {
+ let mut s = String::with_capacity(v.len());
+ for c in v {
+ s.push(*c);
+ }
+ s
+ }
+}
+
+#[stable(feature = "stringfromchars", since = "1.12.0")]
+impl From<Vec<char>> for String {
+ #[inline]
+ fn from(v: Vec<char>) -> String {
+ String::from(v.as_slice())
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Write for String {
#[inline]
assert_eq!(&s[0..], "abcประเทศไทย中华Việt Nam");
}
+#[test]
+fn test_add_assign() {
+ let mut s = String::new();
+ s += "";
+ assert_eq!(s.as_str(), "");
+ s += "abc";
+ assert_eq!(s.as_str(), "abc");
+ s += "ประเทศไทย中华Việt Nam";
+ assert_eq!(s.as_str(), "abcประเทศไทย中华Việt Nam");
+}
+
#[test]
fn test_push() {
let mut data = String::from("ประเทศไทย中");
/// This will invoke the `panic!` macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
+/// Assertions are always checked in both debug and release builds, and cannot
+/// be disabled. See `debug_assert!` for assertions that are not enabled in
+/// release builds by default.
+///
+/// Unsafe code relies on `assert!` to enforce run-time invariants that, if
+/// violated could lead to unsafety.
+///
+/// Other use-cases of `assert!` include
+/// [testing](https://doc.rust-lang.org/book/testing.html) and enforcing
+/// run-time invariants in safe code (whose violation cannot result in unsafety).
+///
/// This macro has a second version, where a custom panic message can be provided.
///
/// # Examples
/// expensive to be present in a release build but may be helpful during
/// development.
///
+/// An unchecked assertion allows a program in an inconsistent state to keep
+/// running, which might have unexpected consequences but does not introduce
+/// unsafety as long as this only happens in safe code. The performance cost
+/// of assertions, is however, not measurable in general. Replacing `assert!`
+/// with `debug_assert!` is thus only encouraged after thorough profiling, and
+/// more importantly, only in safe code!
+///
/// # Examples
///
/// ```
if b {None} else {Some(a)}
}
+ /// Checked absolute value. Computes `self.abs()`, returning `None` if
+ /// `self == MIN`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(no_panic_abs)]
+ ///
+ /// use std::i32;
+ ///
+ /// assert_eq!((-5i32).checked_abs(), Some(5));
+ /// assert_eq!(i32::MIN.checked_abs(), None);
+ /// ```
+ #[unstable(feature = "no_panic_abs", issue = "35057")]
+ #[inline]
+ pub fn checked_abs(self) -> Option<Self> {
+ if self.is_negative() {
+ self.checked_neg()
+ } else {
+ Some(self)
+ }
+ }
+
/// Saturating integer addition. Computes `self + other`, saturating at
/// the numeric bounds instead of overflowing.
///
self.overflowing_shr(rhs).0
}
+ /// Wrapping (modular) absolute value. Computes `self.abs()`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// The only case where such wrapping can occur is when one takes
+ /// the absolute value of the negative minimal value for the type
+ /// this is a positive value that is too large to represent in the
+ /// type. In such a case, this function returns `MIN` itself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(no_panic_abs)]
+ ///
+ /// assert_eq!(100i8.wrapping_abs(), 100);
+ /// assert_eq!((-100i8).wrapping_abs(), 100);
+ /// assert_eq!((-128i8).wrapping_abs(), -128);
+ /// assert_eq!((-128i8).wrapping_abs() as u8, 128);
+ /// ```
+ #[unstable(feature = "no_panic_abs", issue = "35057")]
+ #[inline(always)]
+ pub fn wrapping_abs(self) -> Self {
+ if self.is_negative() {
+ self.wrapping_neg()
+ } else {
+ self
+ }
+ }
+
/// Calculates `self` + `rhs`
///
/// Returns a tuple of the addition along with a boolean indicating
(self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1)))
}
+ /// Computes the absolute value of `self`.
+ ///
+ /// Returns a tuple of the absolute version of self along with a
+ /// boolean indicating whether an overflow happened. If self is the
+ /// minimum value (e.g. i32::MIN for values of type i32), then the
+ /// minimum value will be returned again and true will be returned for
+ /// an overflow happening.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(no_panic_abs)]
+ ///
+ /// assert_eq!(10i8.overflowing_abs(), (10,false));
+ /// assert_eq!((-10i8).overflowing_abs(), (10,false));
+ /// assert_eq!((-128i8).overflowing_abs(), (-128,true));
+ /// ```
+ #[unstable(feature = "no_panic_abs", issue = "35057")]
+ #[inline]
+ pub fn overflowing_abs(self) -> (Self, bool) {
+ if self.is_negative() {
+ self.overflowing_neg()
+ } else {
+ (self, false)
+ }
+ }
+
/// Raises self to the power of `exp`, using exponentiation by squaring.
///
/// # Examples
///
/// This has the same lifetime as the original slice, and so the
/// iterator can continue to be used while this exists.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut chars = "abc".chars();
+ ///
+ /// assert_eq!(chars.as_str(), "abc");
+ /// chars.next();
+ /// assert_eq!(chars.as_str(), "bc");
+ /// chars.next();
+ /// chars.next();
+ /// assert_eq!(chars.as_str(), "");
+ /// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
#[inline]
pub fn as_str(&self) -> &'a str {
#![feature(step_by)]
#![feature(test)]
#![feature(try_from)]
-#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unique)]
[build-dependencies]
build_helper = { path = "../build_helper" }
-gcc = "0.3"
+gcc = "0.3.27"
-Subproject commit b0d62534d48b711c8978d1bbe8cca0558ae7b1cb
+Subproject commit 5066b7dcab7e700844b0e2ba71b8af9dc627a59b
}
}
}
- // Ip is not present in the table. This should not hapen... but it does: issie #35011.
+ // Ip is not present in the table. This should not happen... but it does: issue #35011.
// So rather than returning EHAction::Terminate, we do this.
EHAction::None
} else {
use alloc::boxed::Box;
use unwind as uw;
+use libc::{c_int, uintptr_t};
+use dwarf::eh::{self, EHContext, EHAction};
#[repr(C)]
struct Exception {
0x4d4f5a_00_52555354
}
-// All targets, except ARM which uses a slightly different ABI (however, iOS goes here as it uses
-// SjLj unwinding). Also, 64-bit Windows implementation lives in seh64_gnu.rs
-#[cfg(all(any(target_os = "ios", not(target_arch = "arm"))))]
-pub mod eabi {
- use unwind as uw;
- use libc::{c_int, uintptr_t};
- use dwarf::eh::{EHContext, EHAction, find_eh_action};
- // Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister()
- // and TargetLowering::getExceptionSelectorRegister() for each architecture,
- // then mapped to DWARF register numbers via register definition tables
- // (typically <arch>RegisterInfo.td, search for "DwarfRegNum").
- // See also http://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register.
+// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister()
+// and TargetLowering::getExceptionSelectorRegister() for each architecture,
+// then mapped to DWARF register numbers via register definition tables
+// (typically <arch>RegisterInfo.td, search for "DwarfRegNum").
+// See also http://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register.
- #[cfg(target_arch = "x86")]
- const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX
+#[cfg(target_arch = "x86")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX
- #[cfg(target_arch = "x86_64")]
- const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX
+#[cfg(target_arch = "x86_64")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX
- #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
- const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1
- #[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
- const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1
+#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1
- #[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
- const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4
+#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
+const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4
- // Based on GCC's C and C++ personality routines. For reference, see:
- // https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc
- // https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c
- #[lang = "eh_personality"]
- #[no_mangle]
- #[allow(unused)]
- unsafe extern "C" fn rust_eh_personality(version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- exception_object: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- if version != 1 {
- return uw::_URC_FATAL_PHASE1_ERROR;
- }
- let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8;
- let mut ip_before_instr: c_int = 0;
- let ip = uw::_Unwind_GetIPInfo(context, &mut ip_before_instr);
- let eh_context = EHContext {
- // The return address points 1 byte past the call instruction,
- // which could be in the next IP range in LSDA range table.
- ip: if ip_before_instr != 0 { ip } else { ip - 1 },
- func_start: uw::_Unwind_GetRegionStart(context),
- get_text_start: &|| uw::_Unwind_GetTextRelBase(context),
- get_data_start: &|| uw::_Unwind_GetDataRelBase(context),
- };
- let eh_action = find_eh_action(lsda, &eh_context);
+// The following code is based on GCC's C and C++ personality routines. For reference, see:
+// https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc
+// https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c
- if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 {
- match eh_action {
- EHAction::None | EHAction::Cleanup(_) => return uw::_URC_CONTINUE_UNWIND,
- EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND,
- EHAction::Terminate => return uw::_URC_FATAL_PHASE1_ERROR,
- }
- } else {
- match eh_action {
- EHAction::None => return uw::_URC_CONTINUE_UNWIND,
- EHAction::Cleanup(lpad) | EHAction::Catch(lpad) => {
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, exception_object as uintptr_t);
- uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
- uw::_Unwind_SetIP(context, lpad);
- return uw::_URC_INSTALL_CONTEXT;
- }
- EHAction::Terminate => return uw::_URC_FATAL_PHASE2_ERROR,
+// The personality routine for most of our targets, except ARM, which has a slightly different ABI
+// (however, iOS goes here as it uses SjLj unwinding). Also, the 64-bit Windows implementation
+// lives in seh64_gnu.rs
+#[cfg(all(any(target_os = "ios", not(target_arch = "arm"))))]
+#[lang = "eh_personality"]
+#[no_mangle]
+#[allow(unused)]
+unsafe extern "C" fn rust_eh_personality(version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code {
+ if version != 1 {
+ return uw::_URC_FATAL_PHASE1_ERROR;
+ }
+ let eh_action = find_eh_action(context);
+ if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 {
+ match eh_action {
+ EHAction::None | EHAction::Cleanup(_) => return uw::_URC_CONTINUE_UNWIND,
+ EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND,
+ EHAction::Terminate => return uw::_URC_FATAL_PHASE1_ERROR,
+ }
+ } else {
+ match eh_action {
+ EHAction::None => return uw::_URC_CONTINUE_UNWIND,
+ EHAction::Cleanup(lpad) | EHAction::Catch(lpad) => {
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, exception_object as uintptr_t);
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
+ uw::_Unwind_SetIP(context, lpad);
+ return uw::_URC_INSTALL_CONTEXT;
}
+ EHAction::Terminate => return uw::_URC_FATAL_PHASE2_ERROR,
}
}
-
- #[cfg(stage0)]
- #[lang = "eh_personality_catch"]
- #[no_mangle]
- pub unsafe extern "C" fn rust_eh_personality_catch(version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- rust_eh_personality(version, actions, exception_class, ue_header, context)
- }
}
-// ARM EHABI uses a slightly different personality routine signature,
-// but otherwise works the same.
+// ARM EHABI personality routine.
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf
#[cfg(all(target_arch = "arm", not(target_os = "ios")))]
-pub mod eabi {
- use unwind as uw;
- use libc::c_int;
+#[lang = "eh_personality"]
+#[no_mangle]
+unsafe extern "C" fn rust_eh_personality(state: uw::_Unwind_State,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code {
+ let state = state as c_int;
+ let action = state & uw::_US_ACTION_MASK as c_int;
+ let search_phase = if action == uw::_US_VIRTUAL_UNWIND_FRAME as c_int {
+ // Backtraces on ARM will call the personality routine with
+ // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
+ // we want to continue unwinding the stack, otherwise all our backtraces
+ // would end at __rust_try
+ if state & uw::_US_FORCE_UNWIND as c_int != 0 {
+ return continue_unwind(exception_object, context)
+ }
+ true
+ } else if action == uw::_US_UNWIND_FRAME_STARTING as c_int {
+ false
+ } else if action == uw::_US_UNWIND_FRAME_RESUME as c_int {
+ return continue_unwind(exception_object, context);
+ } else {
+ return uw::_URC_FAILURE;
+ };
- extern "C" {
- fn __gcc_personality_v0(state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code;
- }
+ // The DWARF unwinder assumes that _Unwind_Context holds things like the function
+ // and LSDA pointers, however ARM EHABI places them into the exception object.
+ // To preserve signatures of functions like _Unwind_GetLanguageSpecificData(), which
+ // take only the context pointer, GCC personality routines stash a pointer to exception_object
+ // in the context, using location reserved for ARM's "scratch register" (r12).
+ uw::_Unwind_SetGR(context, uw::UNWIND_POINTER_REG, exception_object as uw::_Unwind_Ptr);
+ // ...A more principled approach would be to provide the full definition of ARM's
+ // _Unwind_Context in our libunwind bindings and fetch the required data from there directly,
+ // bypassing DWARF compatibility functions.
- #[lang = "eh_personality"]
- #[no_mangle]
- extern "C" fn rust_eh_personality(state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- unsafe { __gcc_personality_v0(state, ue_header, context) }
+ let eh_action = find_eh_action(context);
+ if search_phase {
+ match eh_action {
+ EHAction::None |
+ EHAction::Cleanup(_) => return continue_unwind(exception_object, context),
+ EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND,
+ EHAction::Terminate => return uw::_URC_FAILURE,
+ }
+ } else {
+ match eh_action {
+ EHAction::None => return continue_unwind(exception_object, context),
+ EHAction::Cleanup(lpad) | EHAction::Catch(lpad) => {
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, exception_object as uintptr_t);
+ uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0);
+ uw::_Unwind_SetIP(context, lpad);
+ return uw::_URC_INSTALL_CONTEXT;
+ }
+ EHAction::Terminate => return uw::_URC_FAILURE,
+ }
}
- #[lang = "eh_personality_catch"]
- #[no_mangle]
- pub extern "C" fn rust_eh_personality_catch(state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- // Backtraces on ARM will call the personality routine with
- // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases
- // we want to continue unwinding the stack, otherwise all our backtraces
- // would end at __rust_try.
- if (state as c_int & uw::_US_ACTION_MASK as c_int) ==
- uw::_US_VIRTUAL_UNWIND_FRAME as c_int &&
- (state as c_int & uw::_US_FORCE_UNWIND as c_int) == 0 {
- // search phase
- uw::_URC_HANDLER_FOUND // catch!
+ // On ARM EHABI the personality routine is responsible for actually
+ // unwinding a single stack frame before returning (ARM EHABI Sec. 6.1).
+ unsafe fn continue_unwind(exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code {
+ if __gnu_unwind_frame(exception_object, context) == uw::_URC_NO_REASON {
+ uw::_URC_CONTINUE_UNWIND
} else {
- // cleanup phase
- unsafe { __gcc_personality_v0(state, ue_header, context) }
+ uw::_URC_FAILURE
}
}
+ // defined in libgcc
+ extern "C" {
+ fn __gnu_unwind_frame(exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code;
+ }
+}
+
+unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) -> EHAction {
+ let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8;
+ let mut ip_before_instr: c_int = 0;
+ let ip = uw::_Unwind_GetIPInfo(context, &mut ip_before_instr);
+ let eh_context = EHContext {
+ // The return address points 1 byte past the call instruction,
+ // which could be in the next IP range in LSDA range table.
+ ip: if ip_before_instr != 0 { ip } else { ip - 1 },
+ func_start: uw::_Unwind_GetRegionStart(context),
+ get_text_start: &|| uw::_Unwind_GetTextRelBase(context),
+ get_data_start: &|| uw::_Unwind_GetDataRelBase(context),
+ };
+ eh::find_eh_action(lsda, &eh_context)
+}
+
+// *** Delete after a new snapshot ***
+#[cfg(all(stage0, any(target_os = "ios", not(target_arch = "arm"))))]
+#[lang = "eh_personality_catch"]
+#[no_mangle]
+pub unsafe extern "C" fn rust_eh_personality_catch(version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code {
+ rust_eh_personality(version, actions, exception_class, ue_header, context)
+}
+
+// *** Delete after a new snapshot ***
+#[cfg(all(stage0, target_arch = "arm", not(target_os = "ios")))]
+#[lang = "eh_personality_catch"]
+#[no_mangle]
+pub unsafe extern "C" fn rust_eh_personality_catch(state: uw::_Unwind_State,
+ ue_header: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code {
+ rust_eh_personality(state, ue_header, context)
}
// See docs in the `unwind` module.
// This is considered acceptable, because the behavior of throwing exceptions
// through a C ABI boundary is undefined.
+// *** Delete after a new snapshot ***
#[cfg(stage0)]
#[lang = "eh_personality_catch"]
#[cfg(not(test))]
// except according to those terms.
use std::fmt::Debug;
+use std::sync::Arc;
macro_rules! try_opt {
($e:expr) => (
// in an extern crate.
MetaData(D),
+ // Represents some artifact that we save to disk. Note that these
+ // do not have a def-id as part of their identifier.
+ WorkProduct(Arc<WorkProductId>),
+
// Represents different phases in the compiler.
CrateReader,
CollectLanguageItems,
TransCrate => Some(TransCrate),
TransWriteMetadata => Some(TransWriteMetadata),
LinkBinary => Some(LinkBinary),
+
+ // work product names do not need to be mapped, because
+ // they are always absolute.
+ WorkProduct(ref id) => Some(WorkProduct(id.clone())),
+
Hir(ref d) => op(d).map(Hir),
MetaData(ref d) => op(d).map(MetaData),
CollectItem(ref d) => op(d).map(CollectItem),
}
}
}
+
+/// A "work product" corresponds to a `.o` (or other) file that we
+/// save in between runs. These ids do not have a DefId but rather
+/// some independent path or string that persists between runs without
+/// the need to be mapped or unmapped. (This ensures we can serialize
+/// them even in the absence of a tcx.)
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub struct WorkProductId(pub String);
+
// except according to those terms.
use hir::def_id::DefId;
+use rustc_data_structures::fnv::FnvHashMap;
+use session::config::OutputType;
+use std::cell::{Ref, RefCell};
use std::rc::Rc;
+use std::sync::Arc;
-use super::dep_node::DepNode;
+use super::dep_node::{DepNode, WorkProductId};
use super::query::DepGraphQuery;
use super::raii;
use super::thread::{DepGraphThreadData, DepMessage};
#[derive(Clone)]
pub struct DepGraph {
- data: Rc<DepGraphThreadData>
+ data: Rc<DepGraphData>
+}
+
+struct DepGraphData {
+ /// We send messages to the thread to let it build up the dep-graph
+ /// from the current run.
+ thread: DepGraphThreadData,
+
+ /// When we load, there may be `.o` files, cached mir, or other such
+ /// things available to us. If we find that they are not dirty, we
+ /// load the path to the file storing those work-products here into
+ /// this map. We can later look for and extract that data.
+ previous_work_products: RefCell<FnvHashMap<Arc<WorkProductId>, WorkProduct>>,
+
+ /// Work-products that we generate in this run.
+ work_products: RefCell<FnvHashMap<Arc<WorkProductId>, WorkProduct>>,
}
impl DepGraph {
pub fn new(enabled: bool) -> DepGraph {
DepGraph {
- data: Rc::new(DepGraphThreadData::new(enabled))
+ data: Rc::new(DepGraphData {
+ thread: DepGraphThreadData::new(enabled),
+ previous_work_products: RefCell::new(FnvHashMap()),
+ work_products: RefCell::new(FnvHashMap())
+ })
}
}
/// then the other methods on this `DepGraph` will have no net effect.
#[inline]
pub fn enabled(&self) -> bool {
- self.data.enabled()
+ self.data.thread.enabled()
}
pub fn query(&self) -> DepGraphQuery<DefId> {
- self.data.query()
+ self.data.thread.query()
}
pub fn in_ignore<'graph>(&'graph self) -> raii::IgnoreTask<'graph> {
- raii::IgnoreTask::new(&self.data)
+ raii::IgnoreTask::new(&self.data.thread)
}
pub fn in_task<'graph>(&'graph self, key: DepNode<DefId>) -> raii::DepTask<'graph> {
- raii::DepTask::new(&self.data, key)
+ raii::DepTask::new(&self.data.thread, key)
}
pub fn with_ignore<OP,R>(&self, op: OP) -> R
}
pub fn read(&self, v: DepNode<DefId>) {
- self.data.enqueue(DepMessage::Read(v));
+ self.data.thread.enqueue(DepMessage::Read(v));
}
pub fn write(&self, v: DepNode<DefId>) {
- self.data.enqueue(DepMessage::Write(v));
+ self.data.thread.enqueue(DepMessage::Write(v));
+ }
+
+ /// Indicates that a previous work product exists for `v`. This is
+ /// invoked during initial start-up based on what nodes are clean
+ /// (and what files exist in the incr. directory).
+ pub fn insert_previous_work_product(&self, v: &Arc<WorkProductId>, data: WorkProduct) {
+ debug!("insert_previous_work_product({:?}, {:?})", v, data);
+ self.data.previous_work_products.borrow_mut()
+ .insert(v.clone(), data);
+ }
+
+ /// Indicates that we created the given work-product in this run
+ /// for `v`. This record will be preserved and loaded in the next
+ /// run.
+ pub fn insert_work_product(&self, v: &Arc<WorkProductId>, data: WorkProduct) {
+ debug!("insert_work_product({:?}, {:?})", v, data);
+ self.data.work_products.borrow_mut()
+ .insert(v.clone(), data);
}
+
+ /// Check whether a previous work product exists for `v` and, if
+ /// so, return the path that leads to it. Used to skip doing work.
+ pub fn previous_work_product(&self, v: &Arc<WorkProductId>) -> Option<WorkProduct> {
+ self.data.previous_work_products.borrow()
+ .get(v)
+ .cloned()
+ }
+
+ /// Access the map of work-products created during this run. Only
+ /// used during saving of the dep-graph.
+ pub fn work_products(&self) -> Ref<FnvHashMap<Arc<WorkProductId>, WorkProduct>> {
+ self.data.work_products.borrow()
+ }
+}
+
+/// A "work product" is an intermediate result that we save into the
+/// incremental directory for later re-use. The primary example are
+/// the object files that we save for each partition at code
+/// generation time.
+///
+/// Each work product is associated with a dep-node, representing the
+/// process that produced the work-product. If that dep-node is found
+/// to be dirty when we load up, then we will delete the work-product
+/// at load time. If the work-product is found to be clean, then we
+/// will keep a record in the `previous_work_products` list.
+///
+/// In addition, work products have an associated hash. This hash is
+/// an extra hash that can be used to decide if the work-product from
+/// a previous compilation can be re-used (in addition to the dirty
+/// edges check).
+///
+/// As the primary example, consider the object files we generate for
+/// each partition. In the first run, we create partitions based on
+/// the symbols that need to be compiled. For each partition P, we
+/// hash the symbols in P and create a `WorkProduct` record associated
+/// with `DepNode::TransPartition(P)`; the hash is the set of symbols
+/// in P.
+///
+/// The next time we compile, if the `DepNode::TransPartition(P)` is
+/// judged to be clean (which means none of the things we read to
+/// generate the partition were found to be dirty), it will be loaded
+/// into previous work products. We will then regenerate the set of
+/// symbols in the partition P and hash them (note that new symbols
+/// may be added -- for example, new monomorphizations -- even if
+/// nothing in P changed!). We will compare that hash against the
+/// previous hash. If it matches up, we can reuse the object file.
+#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+pub struct WorkProduct {
+ /// Extra hash used to decide if work-product is still suitable;
+ /// note that this is *not* a hash of the work-product itself.
+ /// See documentation on `WorkProduct` type for an example.
+ pub input_hash: u64,
+
+ /// Saved files associated with this CGU
+ pub saved_files: Vec<(OutputType, String)>,
}
pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
pub use self::dep_node::DepNode;
+pub use self::dep_node::WorkProductId;
pub use self::graph::DepGraph;
+pub use self::graph::WorkProduct;
pub use self::query::DepGraphQuery;
pub use self::visit::visit_all_items_in_krate;
pub use self::raii::DepTask;
///////////////////////////////////////////////////////////////////////////
+ fn visit_id(&mut self, _node_id: NodeId) {
+ // Nothing to do.
+ }
fn visit_name(&mut self, _span: Span, _name: Name) {
// Nothing to do.
}
- fn visit_mod(&mut self, m: &'v Mod, _s: Span, _n: NodeId) {
- walk_mod(self, m)
+ fn visit_mod(&mut self, m: &'v Mod, _s: Span, n: NodeId) {
+ walk_mod(self, m, n)
}
fn visit_foreign_item(&mut self, i: &'v ForeignItem) {
walk_foreign_item(self, i)
fn visit_where_predicate(&mut self, predicate: &'v WherePredicate) {
walk_where_predicate(self, predicate)
}
- fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, _: NodeId) {
- walk_fn(self, fk, fd, b, s)
+ fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, id: NodeId) {
+ walk_fn(self, fk, fd, b, s, id)
}
fn visit_trait_item(&mut self, ti: &'v TraitItem) {
walk_trait_item(self, ti)
s: &'v VariantData,
_: Name,
_: &'v Generics,
- _: NodeId,
+ _parent_id: NodeId,
_: Span) {
walk_struct_def(self, s)
}
}
pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) {
+ visitor.visit_id(macro_def.id);
visitor.visit_name(macro_def.span, macro_def.name);
walk_opt_name(visitor, macro_def.span, macro_def.imported_from);
walk_list!(visitor, visit_attribute, ¯o_def.attrs);
}
-pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod) {
+pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod, mod_node_id: NodeId) {
+ visitor.visit_id(mod_node_id);
for &item_id in &module.item_ids {
visitor.visit_nested_item(item_id);
}
}
pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) {
+ visitor.visit_id(local.id);
visitor.visit_pat(&local.pat);
walk_list!(visitor, visit_ty, &local.ty);
walk_list!(visitor, visit_expr, &local.init);
}
pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) {
+ visitor.visit_id(lifetime.id);
visitor.visit_name(lifetime.span, lifetime.name);
}
pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef)
where V: Visitor<'v>
{
+ visitor.visit_id(trait_ref.ref_id);
visitor.visit_path(&trait_ref.path, trait_ref.ref_id)
}
visitor.visit_name(item.span, item.name);
match item.node {
ItemExternCrate(opt_name) => {
+ visitor.visit_id(item.id);
walk_opt_name(visitor, item.span, opt_name)
}
ItemUse(ref vp) => {
+ visitor.visit_id(item.id);
match vp.node {
ViewPathSimple(name, ref path) => {
visitor.visit_name(vp.span, name);
}
ItemStatic(ref typ, _, ref expr) |
ItemConst(ref typ, ref expr) => {
+ visitor.visit_id(item.id);
visitor.visit_ty(typ);
visitor.visit_expr(expr);
}
item.id)
}
ItemMod(ref module) => {
+ // visit_mod() takes care of visiting the Item's NodeId
visitor.visit_mod(module, item.span, item.id)
}
ItemForeignMod(ref foreign_module) => {
+ visitor.visit_id(item.id);
walk_list!(visitor, visit_foreign_item, &foreign_module.items);
}
ItemTy(ref typ, ref type_parameters) => {
+ visitor.visit_id(item.id);
visitor.visit_ty(typ);
visitor.visit_generics(type_parameters)
}
ItemEnum(ref enum_definition, ref type_parameters) => {
visitor.visit_generics(type_parameters);
+ // visit_enum_def() takes care of visiting the Item's NodeId
visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span)
}
ItemDefaultImpl(_, ref trait_ref) => {
+ visitor.visit_id(item.id);
visitor.visit_trait_ref(trait_ref)
}
ItemImpl(_, _, ref type_parameters, ref opt_trait_reference, ref typ, ref impl_items) => {
+ visitor.visit_id(item.id);
visitor.visit_generics(type_parameters);
walk_list!(visitor, visit_trait_ref, opt_trait_reference);
visitor.visit_ty(typ);
}
ItemStruct(ref struct_definition, ref generics) => {
visitor.visit_generics(generics);
+ visitor.visit_id(item.id);
visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span);
}
ItemTrait(_, ref generics, ref bounds, ref methods) => {
+ visitor.visit_id(item.id);
visitor.visit_generics(generics);
walk_list!(visitor, visit_ty_param_bound, bounds);
walk_list!(visitor, visit_trait_item, methods);
enum_definition: &'v EnumDef,
generics: &'v Generics,
item_id: NodeId) {
+ visitor.visit_id(item_id);
walk_list!(visitor,
visit_variant,
&enum_definition.variants,
pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V,
variant: &'v Variant,
generics: &'v Generics,
- item_id: NodeId) {
+ parent_item_id: NodeId) {
visitor.visit_name(variant.span, variant.node.name);
visitor.visit_variant_data(&variant.node.data,
variant.node.name,
generics,
- item_id,
+ parent_item_id,
variant.span);
walk_list!(visitor, visit_expr, &variant.node.disr_expr);
walk_list!(visitor, visit_attribute, &variant.node.attrs);
}
pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) {
+ visitor.visit_id(typ.id);
+
match typ.node {
TyVec(ref ty) => {
visitor.visit_ty(ty)
pub fn walk_path_list_item<'v, V: Visitor<'v>>(visitor: &mut V,
_prefix: &'v Path,
item: &'v PathListItem) {
+ visitor.visit_id(item.node.id());
walk_opt_name(visitor, item.span, item.node.name());
walk_opt_name(visitor, item.span, item.node.rename());
}
pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V,
type_binding: &'v TypeBinding) {
+ visitor.visit_id(type_binding.id);
visitor.visit_name(type_binding.span, type_binding.name);
visitor.visit_ty(&type_binding.ty);
}
pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) {
+ visitor.visit_id(pattern.id);
match pattern.node {
PatKind::TupleStruct(ref path, ref children, _) => {
visitor.visit_path(path, pattern.id);
}
pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) {
+ visitor.visit_id(foreign_item.id);
visitor.visit_vis(&foreign_item.vis);
visitor.visit_name(foreign_item.span, foreign_item.name);
pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) {
for param in &generics.ty_params {
+ visitor.visit_id(param.id);
visitor.visit_name(param.span, param.name);
walk_list!(visitor, visit_ty_param_bound, ¶m.bounds);
walk_list!(visitor, visit_ty, ¶m.default);
}
walk_list!(visitor, visit_lifetime_def, &generics.lifetimes);
+ visitor.visit_id(generics.where_clause.id);
walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates);
}
ref path,
ref ty,
..}) => {
+ visitor.visit_id(id);
visitor.visit_path(path, id);
visitor.visit_ty(ty);
}
pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
for argument in &function_declaration.inputs {
+ visitor.visit_id(argument.id);
visitor.visit_pat(&argument.pat);
visitor.visit_ty(&argument.ty)
}
pub fn walk_fn_decl_nopat<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
for argument in &function_declaration.inputs {
+ visitor.visit_id(argument.id);
visitor.visit_ty(&argument.ty)
}
walk_fn_ret_ty(visitor, &function_declaration.output)
function_kind: FnKind<'v>,
function_declaration: &'v FnDecl,
function_body: &'v Block,
- _span: Span) {
+ _span: Span,
+ id: NodeId) {
+ visitor.visit_id(id);
walk_fn_decl(visitor, function_declaration);
walk_fn_kind(visitor, function_kind);
visitor.visit_block(function_body)
walk_list!(visitor, visit_attribute, &trait_item.attrs);
match trait_item.node {
ConstTraitItem(ref ty, ref default) => {
+ visitor.visit_id(trait_item.id);
visitor.visit_ty(ty);
walk_list!(visitor, visit_expr, default);
}
MethodTraitItem(ref sig, None) => {
+ visitor.visit_id(trait_item.id);
visitor.visit_generics(&sig.generics);
walk_fn_decl(visitor, &sig.decl);
}
trait_item.id);
}
TypeTraitItem(ref bounds, ref default) => {
+ visitor.visit_id(trait_item.id);
walk_list!(visitor, visit_ty_param_bound, bounds);
walk_list!(visitor, visit_ty, default);
}
walk_list!(visitor, visit_attribute, &impl_item.attrs);
match impl_item.node {
ImplItemKind::Const(ref ty, ref expr) => {
+ visitor.visit_id(impl_item.id);
visitor.visit_ty(ty);
visitor.visit_expr(expr);
}
impl_item.id);
}
ImplItemKind::Type(ref ty) => {
+ visitor.visit_id(impl_item.id);
visitor.visit_ty(ty);
}
}
}
pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) {
+ visitor.visit_id(struct_definition.id());
walk_list!(visitor, visit_struct_field, struct_definition.fields());
}
pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) {
+ visitor.visit_id(struct_field.id);
visitor.visit_vis(&struct_field.vis);
visitor.visit_name(struct_field.span, struct_field.name);
visitor.visit_ty(&struct_field.ty);
}
pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) {
+ visitor.visit_id(block.id);
walk_list!(visitor, visit_stmt, &block.stmts);
walk_list!(visitor, visit_expr, &block.expr);
}
pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) {
match statement.node {
- StmtDecl(ref declaration, _) => visitor.visit_decl(declaration),
- StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => {
+ StmtDecl(ref declaration, id) => {
+ visitor.visit_id(id);
+ visitor.visit_decl(declaration)
+ }
+ StmtExpr(ref expression, id) |
+ StmtSemi(ref expression, id) => {
+ visitor.visit_id(id);
visitor.visit_expr(expression)
}
}
}
pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
+ visitor.visit_id(expression.id);
match expression.node {
ExprBox(ref subexpression) => {
visitor.visit_expr(subexpression)
pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility) {
if let Visibility::Restricted { ref path, id } = *vis {
+ visitor.visit_id(id);
visitor.visit_path(path, id)
}
}
-#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
+#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug, PartialEq, Eq)]
pub struct IdRange {
pub min: NodeId,
pub max: NodeId,
self.min >= self.max
}
+ pub fn contains(&self, id: NodeId) -> bool {
+ id >= self.min && id < self.max
+ }
+
pub fn add(&mut self, id: NodeId) {
self.min = cmp::min(self.min, id);
self.max = cmp::max(self.max, id + 1);
}
-}
-pub trait IdVisitingOperation {
- fn visit_id(&mut self, node_id: NodeId);
}
+
pub struct IdRangeComputingVisitor {
pub result: IdRange,
}
}
}
-impl IdVisitingOperation for IdRangeComputingVisitor {
+impl<'v> Visitor<'v> for IdRangeComputingVisitor {
fn visit_id(&mut self, id: NodeId) {
self.result.add(id);
}
}
-pub struct IdVisitor<'a, O: 'a> {
- operation: &'a mut O,
-
- // In general, the id visitor visits the contents of an item, but
- // not including nested trait/impl items, nor other nested items.
- // The base visitor itself always skips nested items, but not
- // trait/impl items. This means in particular that if you start by
- // visiting a trait or an impl, you should not visit the
- // trait/impl items respectively. This is handled by setting
- // `skip_members` to true when `visit_item` is on the stack. This
- // way, if the user begins by calling `visit_trait_item`, we will
- // visit the trait item, but if they begin with `visit_item`, we
- // won't visit the (nested) trait items.
- skip_members: bool,
-}
-
-impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> {
- pub fn new(operation: &'a mut O) -> IdVisitor<'a, O> {
- IdVisitor { operation: operation, skip_members: false }
- }
-
- fn visit_generics_helper(&mut self, generics: &Generics) {
- for type_parameter in generics.ty_params.iter() {
- self.operation.visit_id(type_parameter.id)
- }
- for lifetime in &generics.lifetimes {
- self.operation.visit_id(lifetime.lifetime.id)
- }
- }
-}
-
-impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
- fn visit_mod(&mut self, module: &Mod, _: Span, node_id: NodeId) {
- self.operation.visit_id(node_id);
- walk_mod(self, module)
- }
-
- fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
- self.operation.visit_id(foreign_item.id);
- walk_foreign_item(self, foreign_item)
- }
-
- fn visit_item(&mut self, item: &Item) {
- assert!(!self.skip_members);
- self.skip_members = true;
-
- self.operation.visit_id(item.id);
- match item.node {
- ItemUse(ref view_path) => {
- match view_path.node {
- ViewPathSimple(_, _) |
- ViewPathGlob(_) => {}
- ViewPathList(_, ref paths) => {
- for path in paths {
- self.operation.visit_id(path.node.id())
- }
- }
- }
- }
- _ => {}
- }
- walk_item(self, item);
-
- self.skip_members = false;
- }
-
- fn visit_local(&mut self, local: &Local) {
- self.operation.visit_id(local.id);
- walk_local(self, local)
- }
-
- fn visit_block(&mut self, block: &Block) {
- self.operation.visit_id(block.id);
- walk_block(self, block)
- }
-
- fn visit_stmt(&mut self, statement: &Stmt) {
- self.operation.visit_id(statement.node.id());
- walk_stmt(self, statement)
- }
-
- fn visit_pat(&mut self, pattern: &Pat) {
- self.operation.visit_id(pattern.id);
- walk_pat(self, pattern)
- }
-
- fn visit_expr(&mut self, expression: &Expr) {
- self.operation.visit_id(expression.id);
- walk_expr(self, expression)
- }
-
- fn visit_ty(&mut self, typ: &Ty) {
- self.operation.visit_id(typ.id);
- walk_ty(self, typ)
- }
-
- fn visit_generics(&mut self, generics: &Generics) {
- self.visit_generics_helper(generics);
- walk_generics(self, generics)
- }
-
- fn visit_fn(&mut self,
- function_kind: FnKind<'v>,
- function_declaration: &'v FnDecl,
- block: &'v Block,
- span: Span,
- node_id: NodeId) {
- self.operation.visit_id(node_id);
-
- match function_kind {
- FnKind::ItemFn(_, generics, _, _, _, _, _) => {
- self.visit_generics_helper(generics)
- }
- FnKind::Method(_, sig, _, _) => {
- self.visit_generics_helper(&sig.generics)
- }
- FnKind::Closure(_) => {}
- }
-
- for argument in &function_declaration.inputs {
- self.operation.visit_id(argument.id)
- }
-
- walk_fn(self, function_kind, function_declaration, block, span);
- }
-
- fn visit_struct_field(&mut self, struct_field: &StructField) {
- self.operation.visit_id(struct_field.id);
- walk_struct_field(self, struct_field)
- }
-
- fn visit_variant_data(&mut self,
- struct_def: &VariantData,
- _: Name,
- _: &Generics,
- _: NodeId,
- _: Span) {
- self.operation.visit_id(struct_def.id());
- walk_struct_def(self, struct_def);
- }
-
- fn visit_trait_item(&mut self, ti: &TraitItem) {
- if !self.skip_members {
- self.operation.visit_id(ti.id);
- walk_trait_item(self, ti);
- }
- }
-
- fn visit_impl_item(&mut self, ii: &ImplItem) {
- if !self.skip_members {
- self.operation.visit_id(ii.id);
- walk_impl_item(self, ii);
- }
- }
-
- fn visit_lifetime(&mut self, lifetime: &Lifetime) {
- self.operation.visit_id(lifetime.id);
- }
-
- fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
- self.visit_lifetime(&def.lifetime);
- }
-
- fn visit_trait_ref(&mut self, trait_ref: &TraitRef) {
- self.operation.visit_id(trait_ref.ref_id);
- walk_trait_ref(self, trait_ref);
- }
-}
-
/// Computes the id range for a single fn body, ignoring nested items.
pub fn compute_id_range_for_fn_body(fk: FnKind,
decl: &FnDecl,
sp: Span,
id: NodeId)
-> IdRange {
- let mut visitor = IdRangeComputingVisitor { result: IdRange::max() };
- let mut id_visitor = IdVisitor::new(&mut visitor);
- id_visitor.visit_fn(fk, decl, body, sp, id);
- id_visitor.operation.result
+ let mut visitor = IdRangeComputingVisitor::new();
+ visitor.visit_fn(fk, decl, body, sp, id);
+ visitor.result()
}
fn visit_fn(&mut self, fk: intravisit::FnKind<'ast>, fd: &'ast FnDecl,
b: &'ast Block, s: Span, id: NodeId) {
assert_eq!(self.parent_node, id);
- intravisit::walk_fn(self, fk, fd, b, s);
+ intravisit::walk_fn(self, fk, fd, b, s, id);
}
fn visit_block(&mut self, block: &'ast Block) {
use arena::TypedArena;
use std::cell::RefCell;
+use std::cmp;
use std::io;
use std::mem;
EntryStructCtor(id, _) => id,
EntryLifetime(id, _) => id,
EntryTyParam(id, _) => id,
- _ => return None
+
+ NotPresent |
+ RootCrate |
+ RootInlinedParent(_) => return None,
})
}
map: RefCell<Vec<MapEntry<'ast>>>,
definitions: RefCell<Definitions>,
+
+ /// All NodeIds that are numerically greater or equal to this value come
+ /// from inlined items.
+ local_node_id_watermark: NodeId,
}
impl<'ast> Map<'ast> {
}
}
+ pub fn expect_inlined_item(&self, id: NodeId) -> &'ast InlinedItem {
+ match self.find_entry(id) {
+ Some(RootInlinedParent(inlined_item)) => inlined_item,
+ _ => bug!("expected inlined item, found {}", self.node_to_string(id)),
+ }
+ }
+
/// Returns the name associated with the given NodeId's AST.
pub fn name(&self, id: NodeId) -> Name {
match self.get(id) {
pub fn node_to_user_string(&self, id: NodeId) -> String {
node_id_to_string(self, id, false)
}
+
+ pub fn is_inlined(&self, id: NodeId) -> bool {
+ id >= self.local_node_id_watermark
+ }
}
pub struct NodesMatchingSuffix<'a, 'ast:'a> {
}
/// A Folder that updates IDs and Span's according to fold_ops.
-struct IdAndSpanUpdater<F> {
- fold_ops: F
+pub struct IdAndSpanUpdater<F> {
+ fold_ops: F,
+ min_id_assigned: NodeId,
+ max_id_assigned: NodeId,
+}
+
+impl<F: FoldOps> IdAndSpanUpdater<F> {
+ pub fn new(fold_ops: F) -> IdAndSpanUpdater<F> {
+ IdAndSpanUpdater {
+ fold_ops: fold_ops,
+ min_id_assigned: ::std::u32::MAX,
+ max_id_assigned: ::std::u32::MIN,
+ }
+ }
+
+ pub fn id_range(&self) -> intravisit::IdRange {
+ intravisit::IdRange {
+ min: self.min_id_assigned,
+ max: self.max_id_assigned + 1,
+ }
+ }
}
impl<F: FoldOps> Folder for IdAndSpanUpdater<F> {
fn new_id(&mut self, id: NodeId) -> NodeId {
- self.fold_ops.new_id(id)
+ let id = self.fold_ops.new_id(id);
+
+ self.min_id_assigned = cmp::min(self.min_id_assigned, id);
+ self.max_id_assigned = cmp::max(self.max_id_assigned, id);
+
+ id
}
fn new_span(&mut self, span: Span) -> Span {
entries, vector_length, (entries as f64 / vector_length as f64) * 100.);
}
+ let local_node_id_watermark = map.len() as NodeId;
+
Map {
forest: forest,
dep_graph: forest.dep_graph.clone(),
map: RefCell::new(map),
definitions: RefCell::new(definitions),
+ local_node_id_watermark: local_node_id_watermark
}
}
ii: InlinedItem,
fold_ops: F)
-> &'ast InlinedItem {
- let mut fld = IdAndSpanUpdater { fold_ops: fold_ops };
+ let mut fld = IdAndSpanUpdater::new(fold_ops);
let ii = match ii {
II::Item(i) => II::Item(i.map(|i| fld.fold_item(i))),
II::TraitItem(d, ti) => {
let ii = map.forest.inlined_items.alloc(ii);
let ii_parent_id = fld.new_id(DUMMY_NODE_ID);
+ // Assert that the ii_parent_id is the last NodeId in our reserved range
+ assert!(ii_parent_id == fld.max_id_assigned);
+ // Assert that we did not violate the invariant that all inlined HIR items
+ // have NodeIds greater than or equal to `local_node_id_watermark`
+ assert!(fld.min_id_assigned >= map.local_node_id_watermark);
+
let defs = &mut *map.definitions.borrow_mut();
let mut def_collector = DefCollector::extend(ii_parent_id,
parent_def_path.clone(),
/// TraitRef's appear in impls.
///
/// resolve maps each TraitRef's ref_id to its defining trait; that's all
-/// that the ref_id is for. The impl_id maps to the "self type" of this impl.
-/// If this impl is an ItemImpl, the impl_id is redundant (it could be the
-/// same as the impl's node id).
+/// that the ref_id is for. Note that ref_id's value is not the NodeId of the
+/// trait being referred to but just a unique NodeId that serves as a key
+/// within the DefMap.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct TraitRef {
pub path: Path,
use middle::privacy::AccessLevels;
use ty::TyCtxt;
use session::{config, early_error, Session};
-use lint::{Level, LevelSource, Lint, LintId, LintPass};
+use lint::{Level, LevelSource, Lint, LintId, LintPass, LintSource};
use lint::{EarlyLintPassObject, LateLintPassObject};
use lint::{Default, CommandLine, Node, Allow, Warn, Deny, Forbid};
use lint::builtin;
use errors::DiagnosticBuilder;
use hir;
use hir::intravisit as hir_visit;
-use hir::intravisit::{IdVisitor, IdVisitingOperation};
use syntax::visit as ast_visit;
/// Information about the registered lints.
attr::mark_used(attr);
let meta = &attr.node.value;
- let metas = match meta.node {
- ast::MetaItemKind::List(_, ref metas) => metas,
- _ => {
- out.push(Err(meta.span));
- return out;
- }
+ let metas = if let Some(metas) = meta.meta_item_list() {
+ metas
+ } else {
+ out.push(Err(meta.span));
+ return out;
};
for meta in metas {
- out.push(match meta.node {
- ast::MetaItemKind::Word(ref lint_name) => Ok((lint_name.clone(), level, meta.span)),
- _ => Err(meta.span),
+ out.push(if meta.is_word() {
+ Ok((meta.name().clone(), level, meta.span))
+ } else {
+ Err(meta.span)
});
}
};
for (lint_id, level, span) in v {
- let now = self.lints().get_level_source(lint_id).0;
+ let (now, now_source) = self.lints().get_level_source(lint_id);
if now == Forbid && level != Forbid {
let lint_name = lint_id.as_str();
- span_err!(self.sess(), span, E0453,
- "{}({}) overruled by outer forbid({})",
- level.as_str(), lint_name,
- lint_name);
+ let mut diag_builder = struct_span_err!(self.sess(), span, E0453,
+ "{}({}) overruled by outer forbid({})",
+ level.as_str(), lint_name,
+ lint_name);
+ match now_source {
+ LintSource::Default => &mut diag_builder,
+ LintSource::Node(forbid_source_span) => {
+ diag_builder.span_note(forbid_source_span,
+ "`forbid` lint level set here")
+ },
+ LintSource::CommandLine => {
+ diag_builder.note("`forbid` lint level was set on command line")
+ }
+ }.emit()
} else if now != level {
let src = self.lints().get_level_source(lint_id).1;
self.level_stack().push((lint_id, (now, src)));
}
fn visit_ids<F>(&mut self, f: F)
- where F: FnOnce(&mut IdVisitor<LateContext>)
+ where F: FnOnce(&mut IdVisitor)
{
- let mut v = IdVisitor::new(self);
+ let mut v = IdVisitor {
+ cx: self
+ };
f(&mut v);
}
}
fn visit_fn(&mut self, fk: hir_visit::FnKind<'v>, decl: &'v hir::FnDecl,
body: &'v hir::Block, span: Span, id: ast::NodeId) {
run_lints!(self, check_fn, late_passes, fk, decl, body, span, id);
- hir_visit::walk_fn(self, fk, decl, body, span);
+ hir_visit::walk_fn(self, fk, decl, body, span, id);
run_lints!(self, check_fn_post, late_passes, fk, decl, body, span, id);
}
fn visit_mod(&mut self, m: &hir::Mod, s: Span, n: ast::NodeId) {
run_lints!(self, check_mod, late_passes, m, s, n);
- hir_visit::walk_mod(self, m);
+ hir_visit::walk_mod(self, m, n);
run_lints!(self, check_mod_post, late_passes, m, s, n);
}
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) {
self.with_lint_attrs(&trait_item.attrs, |cx| {
run_lints!(cx, check_trait_item, late_passes, trait_item);
- cx.visit_ids(|v| v.visit_trait_item(trait_item));
+ cx.visit_ids(|v| hir_visit::walk_trait_item(v, trait_item));
hir_visit::walk_trait_item(cx, trait_item);
run_lints!(cx, check_trait_item_post, late_passes, trait_item);
});
fn visit_impl_item(&mut self, impl_item: &hir::ImplItem) {
self.with_lint_attrs(&impl_item.attrs, |cx| {
run_lints!(cx, check_impl_item, late_passes, impl_item);
- cx.visit_ids(|v| v.visit_impl_item(impl_item));
+ cx.visit_ids(|v| hir_visit::walk_impl_item(v, impl_item));
hir_visit::walk_impl_item(cx, impl_item);
run_lints!(cx, check_impl_item_post, late_passes, impl_item);
});
}
}
+struct IdVisitor<'a, 'b: 'a, 'tcx: 'a+'b> {
+ cx: &'a mut LateContext<'b, 'tcx>
+}
+
// Output any lints that were previously added to the session.
-impl<'a, 'tcx> IdVisitingOperation for LateContext<'a, 'tcx> {
+impl<'a, 'b, 'tcx, 'v> hir_visit::Visitor<'v> for IdVisitor<'a, 'b, 'tcx> {
+
fn visit_id(&mut self, id: ast::NodeId) {
- if let Some(lints) = self.sess().lints.borrow_mut().remove(&id) {
+ if let Some(lints) = self.cx.sess().lints.borrow_mut().remove(&id) {
debug!("LateContext::visit_id: id={:?} lints={:?}", id, lints);
for (lint_id, span, msg) in lints {
- self.span_lint(lint_id.lint, span, &msg[..])
+ self.cx.span_lint(lint_id.lint, span, &msg[..])
}
}
}
+
+ fn visit_trait_item(&mut self, _ti: &hir::TraitItem) {
+ // Do not recurse into trait or impl items automatically. These are
+ // processed separately by calling hir_visit::walk_trait_item()
+ }
+
+ fn visit_impl_item(&mut self, _ii: &hir::ImplItem) {
+ // See visit_trait_item()
+ }
}
enum CheckLintNameResult {
// Visit the whole crate.
cx.with_lint_attrs(&krate.attrs, |cx| {
- cx.visit_id(ast::CRATE_NODE_ID);
cx.visit_ids(|v| {
hir_visit::walk_crate(v, krate);
});
use syntax_pos::Span;
use rustc_back::target::Target;
use hir;
-use hir::intravisit::{IdVisitor, IdVisitingOperation, Visitor};
+use hir::intravisit::Visitor;
pub use self::DefLike::{DlDef, DlField, DlImpl};
pub use self::NativeLibraryKind::{NativeStatic, NativeFramework, NativeUnknown};
pub vis: ty::Visibility,
}
-pub enum FoundAst<'ast> {
- Found(&'ast InlinedItem),
- FoundParent(DefId, &'ast hir::Item),
- NotFound,
-}
-
#[derive(Copy, Clone, Debug)]
pub struct ExternCrate {
/// def_id of an `extern crate` in the current crate that caused
// misc. metadata
fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
- -> FoundAst<'tcx>;
+ -> Option<(&'tcx InlinedItem, ast::NodeId)>;
+ fn local_node_for_inlined_defid(&'tcx self, def_id: DefId) -> Option<ast::NodeId>;
+ fn defid_for_inlined_node(&'tcx self, node_id: ast::NodeId) -> Option<DefId>;
+
fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> Option<Mir<'tcx>>;
fn is_item_mir_available(&self, def: DefId) -> bool;
InlinedItem::ImplItem(_, ref ii) => visitor.visit_impl_item(ii),
}
}
-
- pub fn visit_ids<O: IdVisitingOperation>(&self, operation: &mut O) {
- let mut id_visitor = IdVisitor::new(operation);
- self.visit(&mut id_visitor);
- }
}
// FIXME: find a better place for this?
// misc. metadata
fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
- -> FoundAst<'tcx> { bug!("maybe_get_item_ast") }
+ -> Option<(&'tcx InlinedItem, ast::NodeId)> {
+ bug!("maybe_get_item_ast")
+ }
+ fn local_node_for_inlined_defid(&'tcx self, def_id: DefId) -> Option<ast::NodeId> {
+ bug!("local_node_for_inlined_defid")
+ }
+ fn defid_for_inlined_node(&'tcx self, node_id: ast::NodeId) -> Option<DefId> {
+ bug!("defid_for_inlined_node")
+ }
+
fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> Option<Mir<'tcx>> { bug!("maybe_get_item_mir") }
fn is_item_mir_available(&self, def: DefId) -> bool {
impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> {
fn visit_fn(&mut self, fn_kind: FnKind<'v>, fn_decl: &'v hir::FnDecl,
- block: &'v hir::Block, span: Span, _: ast::NodeId) {
+ block: &'v hir::Block, span: Span, id: ast::NodeId) {
let (is_item_fn, is_unsafe_fn) = match fn_kind {
FnKind::ItemFn(_, _, unsafety, _, _, _, _) =>
self.unsafe_context = UnsafeContext::new(SafeContext)
}
- intravisit::walk_fn(self, fn_kind, fn_decl, block, span);
+ intravisit::walk_fn(self, fn_kind, fn_decl, block, span, id);
self.unsafe_context = old_unsafe_context
}
impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
pub fn new(delegate: &'a mut (Delegate<'tcx>+'a),
- infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>)
+ -> Self
+ {
+ ExprUseVisitor::with_options(delegate, infcx, mc::MemCategorizationOptions::default())
+ }
+
+ pub fn with_options(delegate: &'a mut (Delegate<'tcx>+'a),
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ options: mc::MemCategorizationOptions)
+ -> Self
{
ExprUseVisitor {
- mc: mc::MemCategorizationContext::new(infcx),
+ mc: mc::MemCategorizationContext::with_options(infcx, options),
delegate: delegate
}
}
StartFnLangItem, "start", start_fn;
EhPersonalityLangItem, "eh_personality", eh_personality;
- EhPersonalityCatchLangItem, "eh_personality_catch", eh_personality_catch;
EhUnwindResumeLangItem, "eh_unwind_resume", eh_unwind_resume;
MSVCTryFilterLangItem, "msvc_try_filter", msvc_try_filter;
// gather up the various local variables, significant expressions,
// and so forth:
- intravisit::walk_fn(&mut fn_maps, fk, decl, body, sp);
+ intravisit::walk_fn(&mut fn_maps, fk, decl, body, sp, id);
// Special nodes and variables:
// - exit_ln represents the end of the fn, either by return or panic
#[derive(Copy, Clone)]
pub struct MemCategorizationContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
pub infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ options: MemCategorizationOptions,
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct MemCategorizationOptions {
+ // If true, then when analyzing a closure upvar, if the closure
+ // has a missing kind, we treat it like a Fn closure. When false,
+ // we ICE if the closure has a missing kind. Should be false
+ // except during closure kind inference. It is used by the
+ // mem-categorization code to be able to have stricter assertions
+ // (which are always true except during upvar inference).
+ pub during_closure_kind_inference: bool,
}
pub type McResult<T> = Result<T, ()>;
impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>)
-> MemCategorizationContext<'a, 'gcx, 'tcx> {
- MemCategorizationContext { infcx: infcx }
+ MemCategorizationContext::with_options(infcx, MemCategorizationOptions::default())
+ }
+
+ pub fn with_options(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ options: MemCategorizationOptions)
+ -> MemCategorizationContext<'a, 'gcx, 'tcx> {
+ MemCategorizationContext {
+ infcx: infcx,
+ options: options,
+ }
}
fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
self.cat_upvar(id, span, var_id, fn_node_id, kind)
}
None => {
- span_bug!(
- span,
- "No closure kind for {:?}",
- closure_id);
+ if !self.options.during_closure_kind_inference {
+ span_bug!(
+ span,
+ "No closure kind for {:?}",
+ closure_id);
+ }
+
+ // during closure kind inference, we
+ // don't know the closure kind yet, but
+ // it's ok because we detect that we are
+ // accessing an upvar and handle that
+ // case specially anyhow. Use Fn
+ // arbitrarily.
+ self.cat_upvar(id, span, var_id, fn_node_id, ty::ClosureKind::Fn)
}
}
}
use syntax::parse::token::InternedString;
use syntax::feature_gate::UnstableFeatures;
-use errors::{ColorConfig, Handler};
+use errors::{ColorConfig, FatalError, Handler};
use getopts;
use std::collections::HashMap;
FullDebugInfo,
}
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
pub enum OutputType {
Bitcode,
Assembly,
OutputType::DepInfo => "dep-info",
}
}
+
+ pub fn extension(&self) -> &'static str {
+ match *self {
+ OutputType::Bitcode => "bc",
+ OutputType::Assembly => "s",
+ OutputType::LlvmAssembly => "ll",
+ OutputType::Object => "o",
+ OutputType::DepInfo => "d",
+ OutputType::Exe => "",
+ }
+ }
}
#[derive(Clone)]
flavor: OutputType,
codegen_unit_name: Option<&str>)
-> PathBuf {
- let extension = match flavor {
- OutputType::Bitcode => "bc",
- OutputType::Assembly => "s",
- OutputType::LlvmAssembly => "ll",
- OutputType::Object => "o",
- OutputType::DepInfo => "d",
- OutputType::Exe => "",
- };
-
+ let extension = flavor.extension();
self.temp_path_ext(extension, codegen_unit_name)
}
self.debugging_opts.dump_dep_graph ||
self.debugging_opts.query_dep_graph
}
+
+ pub fn single_codegen_unit(&self) -> bool {
+ self.incremental.is_none() ||
+ self.cg.codegen_units == 1
+ }
}
// The type of entry function, so
pub const parse_bool: Option<&'static str> = None;
pub const parse_opt_bool: Option<&'static str> =
Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`");
+ pub const parse_all_bool: Option<&'static str> =
+ Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`");
pub const parse_string: Option<&'static str> = Some("a string");
pub const parse_opt_string: Option<&'static str> = Some("a string");
pub const parse_list: Option<&'static str> = Some("a space-separated list of strings");
}
}
+ fn parse_all_bool(slot: &mut bool, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ match s {
+ "n" | "no" | "off" => {
+ *slot = false;
+ }
+ "y" | "yes" | "on" => {
+ *slot = true;
+ }
+ _ => { return false; }
+ }
+
+ true
+ },
+ None => { *slot = true; true }
+ }
+ }
+
fn parse_opt_string(slot: &mut Option<String>, v: Option<&str>) -> bool {
match v {
Some(s) => { *slot = Some(s.to_string()); true },
"panic strategy to compile crate with"),
}
-
options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
build_debugging_options, "Z", "debugging",
DB_OPTIONS, db_type_desc, dbsetters,
"dump MIR state at various points in translation"),
dump_mir_dir: Option<String> = (None, parse_opt_string,
"the directory the MIR is dumped into"),
- orbit: bool = (false, parse_bool,
+ orbit: bool = (true, parse_all_bool,
"get MIR where it belongs - everywhere; most importantly, in orbit"),
}
let target = match Target::search(&opts.target_triple) {
Ok(t) => t,
Err(e) => {
- panic!(sp.fatal(&format!("Error loading target specification: {}", e)));
+ sp.struct_fatal(&format!("Error loading target specification: {}", e))
+ .help("Use `--print target-list` for a list of built-in targets")
+ .emit();
+ panic!(FatalError);
}
};
// forms a unique global identifier for the crate. It is used to allow
// multiple crates with the same name to coexist. See the
// trans::back::symbol_names module for more information.
- pub crate_disambiguator: Cell<ast::Name>,
+ pub crate_disambiguator: RefCell<token::InternedString>,
pub features: RefCell<feature_gate::Features>,
/// The maximum recursion limit for potentially infinitely recursive
}
impl Session {
+ pub fn local_crate_disambiguator(&self) -> token::InternedString {
+ self.crate_disambiguator.borrow().clone()
+ }
pub fn struct_span_warn<'a, S: Into<MultiSpan>>(&'a self,
sp: S,
msg: &str)
plugin_attributes: RefCell::new(Vec::new()),
crate_types: RefCell::new(Vec::new()),
dependency_formats: RefCell::new(FnvHashMap()),
- crate_disambiguator: Cell::new(token::intern("")),
+ crate_disambiguator: RefCell::new(token::intern("").as_str()),
features: RefCell::new(feature_gate::Features::new()),
recursion_limit: Cell::new(64),
next_node_id: Cell::new(1),
pub fn crate_disambiguator(self, cnum: ast::CrateNum) -> token::InternedString {
if cnum == LOCAL_CRATE {
- self.sess.crate_disambiguator.get().as_str()
+ self.sess.local_crate_disambiguator()
} else {
self.sess.cstore.crate_disambiguator(cnum)
}
pat_util::arm_contains_ref_binding(arm)
}
+ pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
+ match ty.sty {
+ ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
+ for field in def.all_fields() {
+ let field_ty = field.ty(self, substs);
+ if let TyError = field_ty.sty {
+ return true;
+ }
+ }
+ }
+ _ => ()
+ }
+ false
+ }
+
/// Returns the type of element at index `i` in tuple or tuple-like type `t`.
/// For an enum `t`, `variant` is None only if `t` is a univariant enum.
pub fn positional_element_ty(self,
use std::path::{self, Path, PathBuf};
use std::ffi::OsString;
+use std::fs;
+use std::io;
// Unfortunately, on windows, it looks like msvcrt.dll is silently translating
// verbatim paths under the hood to non-verbatim paths! This manifests itself as
_ => p.to_path_buf(),
}
}
+
+/// Copy `p` into `q`, preferring to use hard-linking if possible. If
+/// `q` already exists, it is removed first.
+pub fn link_or_copy<P: AsRef<Path>, Q: AsRef<Path>>(p: P, q: Q) -> io::Result<()> {
+ let p = p.as_ref();
+ let q = q.as_ref();
+ if q.exists() {
+ try!(fs::remove_file(&q));
+ }
+ fs::hard_link(p, q)
+ .or_else(|_| fs::copy(p, q).map(|_| ()))
+}
ty::TyVar(ref vid) if print_var_ids => write!(f, "{:?}", vid),
ty::IntVar(ref vid) if print_var_ids => write!(f, "{:?}", vid),
ty::FloatVar(ref vid) if print_var_ids => write!(f, "{:?}", vid),
- ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => write!(f, "_"),
+ ty::TyVar(_) => write!(f, "_"),
+ ty::IntVar(_) => write!(f, "{}", "{integer}"),
+ ty::FloatVar(_) => write!(f, "{}", "{float}"),
ty::FreshTy(v) => write!(f, "FreshTy({})", v),
ty::FreshIntTy(v) => write!(f, "FreshIntTy({})", v),
ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
use super::apple_ios_base::{opts, Arch};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ let base = try!(opts(Arch::Arm64));
+ Ok(Target {
llvm_target: "arm64-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
features: "+neon,+fp-armv8,+cyclone".to_string(),
eliminate_frame_pointer: false,
max_atomic_width: 128,
- .. opts(Arch::Arm64)
+ .. base
},
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
base.max_atomic_width = 128;
// As documented in http://developer.android.com/ndk/guides/cpu-features.html
// the neon (ASIMD) and FP must exist on all android aarch64 targets.
base.features = "+neon,+fp-armv8".to_string();
- Target {
+ Ok(Target {
llvm_target: "aarch64-linux-android".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
- data_layout: "e-m:e-i64:64-i128:128-n32:64-S128".to_string(),
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "android".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.max_atomic_width = 128;
- Target {
+ Ok(Target {
llvm_target: "aarch64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "gnu".to_string(),
- data_layout: "e-m:e-i64:64-i128:128-n32:64-S128".to_string(),
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "linux".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
}
}
-pub fn get_sdk_root(sdk_name: &str) -> String {
+pub fn get_sdk_root(sdk_name: &str) -> Result<String, String> {
let res = Command::new("xcrun")
.arg("--show-sdk-path")
.arg("-sdk")
});
match res {
- Ok(output) => output.trim().to_string(),
- Err(e) => panic!("failed to get {} SDK path: {}", sdk_name, e)
+ Ok(output) => Ok(output.trim().to_string()),
+ Err(e) => Err(format!("failed to get {} SDK path: {}", sdk_name, e))
}
}
-fn pre_link_args(arch: Arch) -> Vec<String> {
+fn build_pre_link_args(arch: Arch) -> Result<Vec<String>, String> {
let sdk_name = match arch {
Armv7 | Armv7s | Arm64 => "iphoneos",
I386 | X86_64 => "iphonesimulator"
let arch_name = arch.to_string();
- vec!["-arch".to_string(), arch_name.to_string(),
- "-Wl,-syslibroot".to_string(), get_sdk_root(sdk_name)]
+ let sdk_root = try!(get_sdk_root(sdk_name));
+
+ Ok(vec!["-arch".to_string(), arch_name.to_string(),
+ "-Wl,-syslibroot".to_string(), sdk_root])
}
fn target_cpu(arch: Arch) -> String {
}.to_string()
}
-pub fn opts(arch: Arch) -> TargetOptions {
- TargetOptions {
+pub fn opts(arch: Arch) -> Result<TargetOptions, String> {
+ let pre_link_args = try!(build_pre_link_args(arch));
+ Ok(TargetOptions {
cpu: target_cpu(arch),
dynamic_linking: false,
executables: true,
- pre_link_args: pre_link_args(arch),
+ pre_link_args: pre_link_args,
has_elf_tls: false,
.. super::apple_base::opts()
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
base.features = "+v7,+vfp3,+d16".to_string();
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "arm-linux-androideabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "arm-unknown-linux-gnueabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
features: "+v6".to_string(),
.. base
},
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
features: "+v6,+vfp2".to_string(),
.. base
}
- }
+ })
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::linux_musl_base::opts();
+
+ // Most of these settings are copied from the arm_unknown_linux_gnueabi
+ // target.
+ base.features = "+v6".to_string();
+ base.max_atomic_width = 64;
+ Ok(Target {
+ // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it
+ // to determine the calling convention and float ABI, and it doesn't
+ // support the "musleabi" value.
+ llvm_target: "arm-unknown-linux-gnueabi".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "linux".to_string(),
+ target_env: "musl".to_string(),
+ target_vendor: "unknown".to_string(),
+ options: base,
+ })
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::linux_musl_base::opts();
+
+ // Most of these settings are copied from the arm_unknown_linux_gnueabihf
+ // target.
+ base.features = "+v6,+vfp2".to_string();
+ base.max_atomic_width = 64;
+ Ok(Target {
+ // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
+ // uses it to determine the calling convention and float ABI, and it
+ // doesn't support the "musleabihf" value.
+ llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "linux".to_string(),
+ target_env: "musl".to_string(),
+ target_vendor: "unknown".to_string(),
+ options: base,
+ })
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
use super::apple_ios_base::{opts, Arch};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ let base = try!(opts(Arch::Armv7));
+ Ok(Target {
llvm_target: "armv7-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
options: TargetOptions {
features: "+v7,+vfp3,+neon".to_string(),
max_atomic_width: 64,
- .. opts(Arch::Armv7)
+ .. base
}
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
base.features = "+v7,+thumb2,+vfp3,+d16".to_string();
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "armv7-none-linux-android".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let base = super::linux_base::opts();
- Target {
+ Ok(Target {
llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
max_atomic_width: 64,
.. base
}
- }
+ })
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::linux_musl_base::opts();
+
+ // Most of these settings are copied from the armv7_unknown_linux_gnueabihf
+ // target.
+ base.features = "+v7,+vfp3,+neon".to_string();
+ base.cpu = "cortex-a8".to_string();
+ base.max_atomic_width = 64;
+ Ok(Target {
+ // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
+ // uses it to determine the calling convention and float ABI, and LLVM
+ // doesn't support the "musleabihf" value.
+ llvm_target: "armv7-unknown-linux-gnueabihf".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "linux".to_string(),
+ target_env: "musl".to_string(),
+ target_vendor: "unknown".to_string(),
+ options: base,
+ })
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
use super::apple_ios_base::{opts, Arch};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ let base = try!(opts(Arch::Armv7s));
+ Ok(Target {
llvm_target: "armv7s-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
options: TargetOptions {
features: "+v7,+vfp4,+neon".to_string(),
max_atomic_width: 64,
- .. opts(Arch::Armv7s)
+ .. base
}
- }
+ })
}
use super::{Target, TargetOptions};
-pub fn target() -> Target {
+pub fn target() -> Result<Target, String> {
let opts = TargetOptions {
linker: "emcc".to_string(),
ar: "emar".to_string(),
max_atomic_width: 32,
.. Default::default()
};
- Target {
+ Ok(Target {
llvm_target: "asmjs-unknown-emscripten".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
arch: "asmjs".to_string(),
options: opts,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
use super::apple_ios_base::{opts, Arch};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ let base = try!(opts(Arch::I386));
+ Ok(Target {
llvm_target: "i386-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_vendor: "apple".to_string(),
options: TargetOptions {
max_atomic_width: 64,
- .. opts(Arch::I386)
+ .. base
}
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::TargetResult;
-pub fn target() -> Target {
- let mut base = super::i686_pc_windows_msvc::target();
+pub fn target() -> TargetResult {
+ let mut base = try!(super::i686_pc_windows_msvc::target());
base.options.cpu = "pentium".to_string();
base.llvm_target = "i586-pc-windows-msvc".to_string();
- return base
+ Ok(base)
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::TargetResult;
-pub fn target() -> Target {
- let mut base = super::i686_unknown_linux_gnu::target();
+pub fn target() -> TargetResult {
+ let mut base = try!(super::i686_unknown_linux_gnu::target());
base.options.cpu = "pentium".to_string();
base.llvm_target = "i586-unknown-linux-gnu".to_string();
- return base
+ Ok(base)
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::apple_base::opts();
base.cpu = "yonah".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m32".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-apple-darwin".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "".to_string(),
target_vendor: "apple".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
base.max_atomic_width = 64;
base.cpu = "pentiumpro".to_string();
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".to_string();
- Target {
+ Ok(Target {
llvm_target: "i686-linux-android".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::windows_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = 64;
// space available to x86 Windows binaries on x86_64.
base.pre_link_args.push("-Wl,--large-address-aware".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-pc-windows-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "gnu".to_string(),
target_vendor: "pc".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::windows_msvc_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = 64;
// https://msdn.microsoft.com/en-us/library/9a89h429.aspx
base.pre_link_args.push("/SAFESEH".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-pc-windows-msvc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "msvc".to_string(),
target_vendor: "pc".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::dragonfly_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m32".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-unknown-dragonfly".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::freebsd_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m32".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-unknown-freebsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m32".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_musl_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m32".to_string());
base.pre_link_args.push("-Wl,-melf_i386".to_string());
- Target {
+ Ok(Target {
llvm_target: "i686-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_env: "musl".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::{Target, TargetOptions};
+use super::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let opts = TargetOptions {
linker: "pnacl-clang".to_string(),
ar: "pnacl-ar".to_string(),
max_atomic_width: 32,
.. Default::default()
};
- Target {
+ Ok(Target {
llvm_target: "le32-unknown-nacl".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
data_layout: "e-i64:64:64-p:32:32:32-v128:32:32".to_string(),
arch: "le32".to_string(),
options: opts,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ Ok(Target {
llvm_target: "mips-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
max_atomic_width: 32,
..super::linux_base::opts()
},
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ Ok(Target {
llvm_target: "mips-unknown-linux-musl".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
max_atomic_width: 32,
..super::linux_base::opts()
}
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ Ok(Target {
llvm_target: "mipsel-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
max_atomic_width: 32,
..super::linux_base::opts()
},
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ Ok(Target {
llvm_target: "mipsel-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
target_vendor: "unknown".to_string(),
options: TargetOptions {
cpu: "mips32".to_string(),
- features: "+mips32".to_string(),
+ features: "+mips32,+soft-float".to_string(),
max_atomic_width: 32,
..super::linux_base::opts()
}
- }
+ })
}
//! the target's settings, though `target-feature` and `link-args` will *add*
//! to the list specified by the target, rather than replace.
-use serialize::json::Json;
+use serialize::json::{Json, ToJson};
+use std::collections::BTreeMap;
use std::default::Default;
use std::io::prelude::*;
use syntax::abi::Abi;
mod windows_base;
mod windows_msvc_base;
+pub type TargetResult = Result<Target, String>;
+
macro_rules! supported_targets {
( $(($triple:expr, $module:ident)),+ ) => (
$(mod $module;)*
/// List of supported targets
- pub const TARGETS: &'static [&'static str] = &[$($triple),*];
+ const TARGETS: &'static [&'static str] = &[$($triple),*];
- fn load_specific(target: &str) -> Option<Target> {
+ fn load_specific(target: &str) -> TargetResult {
match target {
$(
$triple => {
- let mut t = $module::target();
+ let mut t = try!($module::target());
t.options.is_builtin = true;
+
+ // round-trip through the JSON parser to ensure at
+ // run-time that the parser works correctly
+ t = try!(Target::from_json(t.to_json()));
debug!("Got builtin target: {:?}", t);
- Some(t)
+ Ok(t)
},
)+
- _ => None
+ _ => Err(format!("Unable to find target: {}", target))
}
}
+
+ pub fn get_targets() -> Box<Iterator<Item=String>> {
+ Box::new(TARGETS.iter().filter_map(|t| -> Option<String> {
+ load_specific(t)
+ .map(|t| t.llvm_target)
+ .ok()
+ }))
+ }
+
+ #[cfg(test)]
+ mod test_json_encode_decode {
+ use serialize::json::ToJson;
+ use super::Target;
+ $(use super::$module;)*
+
+ $(
+ #[test]
+ fn $module() {
+ // Grab the TargetResult struct. If we successfully retrieved
+ // a Target, then the test JSON encoding/decoding can run for this
+ // Target on this testing platform (i.e., checking the iOS targets
+ // only on a Mac test platform).
+ let _ = $module::target().map(|original| {
+ let as_json = original.to_json();
+ let parsed = Target::from_json(as_json).unwrap();
+ assert_eq!(original, parsed);
+ });
+ }
+ )*
+ }
)
}
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
+ ("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
+ ("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf),
("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf),
+ ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf),
("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu),
("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl),
("i686-unknown-linux-musl", i686_unknown_linux_musl),
/// Everything `rustc` knows about how to compile for a specific target.
///
/// Every field here must be specified, and has no default value.
-#[derive(Clone, Debug)]
+#[derive(PartialEq, Clone, Debug)]
pub struct Target {
/// Target triple to pass to LLVM.
pub llvm_target: String,
///
/// This has an implementation of `Default`, see each field for what the default is. In general,
/// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
-#[derive(Clone, Debug)]
+#[derive(PartialEq, Clone, Debug)]
pub struct TargetOptions {
/// Whether the target is built-in or loaded from a custom target specification.
pub is_builtin: bool,
pub is_like_android: bool,
/// Whether the linker support GNU-like arguments such as -O. Defaults to false.
pub linker_is_gnu: bool,
+ /// The MinGW toolchain has a known issue that prevents it from correctly
+ /// handling COFF object files with more than 2^15 sections. Since each weak
+ /// symbol needs its own COMDAT section, weak linkage implies a large
+ /// number sections that easily exceeds the given limit for larger
+ /// codebases. Consequently we want a way to disallow weak linkage on some
+ /// platforms.
+ pub allows_weak_linkage: bool,
/// Whether the linker support rpaths or not. Defaults to false.
pub has_rpath: bool,
/// Whether to disable linking to compiler-rt. Defaults to false, as LLVM
is_like_android: false,
is_like_msvc: false,
linker_is_gnu: false,
+ allows_weak_linkage: true,
has_rpath: false,
no_compiler_rt: false,
no_default_libraries: true,
}
/// Load a target descriptor from a JSON object.
- pub fn from_json(obj: Json) -> Target {
- // this is 1. ugly, 2. error prone.
+ pub fn from_json(obj: Json) -> TargetResult {
+ // While ugly, this code must remain this way to retain
+ // compatibility with existing JSON fields and the internal
+ // expected naming of the Target and TargetOptions structs.
+ // To ensure compatibility is retained, the built-in targets
+ // are round-tripped through this code to catch cases where
+ // the JSON parser is not updated to match the structs.
let get_req_field = |name: &str| {
match obj.find(name)
.map(|s| s.as_string())
.and_then(|os| os.map(|s| s.to_string())) {
- Some(val) => val,
+ Some(val) => Ok(val),
None => {
- panic!("Field {} in target specification is required", name)
+ return Err(format!("Field {} in target specification is required", name))
}
}
};
};
let mut base = Target {
- llvm_target: get_req_field("llvm-target"),
- target_endian: get_req_field("target-endian"),
- target_pointer_width: get_req_field("target-pointer-width"),
- data_layout: get_req_field("data-layout"),
- arch: get_req_field("arch"),
- target_os: get_req_field("os"),
+ llvm_target: try!(get_req_field("llvm-target")),
+ target_endian: try!(get_req_field("target-endian")),
+ target_pointer_width: try!(get_req_field("target-pointer-width")),
+ data_layout: try!(get_req_field("data-layout")),
+ arch: try!(get_req_field("arch")),
+ target_os: try!(get_req_field("os")),
target_env: get_opt_field("env", ""),
target_vendor: get_opt_field("vendor", "unknown"),
options: Default::default(),
} );
}
- key!(cpu);
- key!(ar);
+ key!(is_builtin, bool);
key!(linker);
+ key!(ar);
+ key!(pre_link_args, list);
+ key!(pre_link_objects_exe, list);
+ key!(pre_link_objects_dll, list);
+ key!(late_link_args, list);
+ key!(post_link_objects, list);
+ key!(post_link_args, list);
+ key!(cpu);
+ key!(features);
+ key!(dynamic_linking, bool);
+ key!(executables, bool);
key!(relocation_model);
key!(code_model);
+ key!(disable_redzone, bool);
+ key!(eliminate_frame_pointer, bool);
+ key!(function_sections, bool);
key!(dll_prefix);
key!(dll_suffix);
key!(exe_suffix);
key!(staticlib_prefix);
key!(staticlib_suffix);
- key!(features);
- key!(dynamic_linking, bool);
- key!(executables, bool);
- key!(disable_redzone, bool);
- key!(eliminate_frame_pointer, bool);
- key!(function_sections, bool);
key!(target_family, optional);
key!(is_like_osx, bool);
+ key!(is_like_solaris, bool);
key!(is_like_windows, bool);
key!(is_like_msvc, bool);
+ key!(is_like_android, bool);
key!(linker_is_gnu, bool);
+ key!(allows_weak_linkage, bool);
key!(has_rpath, bool);
key!(no_compiler_rt, bool);
key!(no_default_libraries, bool);
- key!(pre_link_args, list);
- key!(post_link_args, list);
+ key!(position_independent_executables, bool);
key!(archive_format);
key!(allow_asm, bool);
key!(custom_unwind_resume, bool);
+ key!(lib_allocation_crate);
+ key!(exe_allocation_crate);
+ key!(has_elf_tls, bool);
+ key!(obj_is_bitcode, bool);
key!(max_atomic_width, u64);
- base
+ Ok(base)
}
/// Search RUST_TARGET_PATH for a JSON file specifying the given target
f.read_to_end(&mut contents).map_err(|e| e.to_string())?;
let obj = json::from_reader(&mut &contents[..])
.map_err(|e| e.to_string())?;
- Ok(Target::from_json(obj))
+ Target::from_json(obj)
}
- if let Some(t) = load_specific(target) {
+ if let Ok(t) = load_specific(target) {
return Ok(t)
}
}
}
+impl ToJson for Target {
+ fn to_json(&self) -> Json {
+ let mut d = BTreeMap::new();
+ let default: TargetOptions = Default::default();
+
+ macro_rules! target_val {
+ ($attr:ident) => ( {
+ let name = (stringify!($attr)).replace("_", "-");
+ d.insert(name.to_string(), self.$attr.to_json());
+ } );
+ ($attr:ident, $key_name:expr) => ( {
+ let name = $key_name;
+ d.insert(name.to_string(), self.$attr.to_json());
+ } );
+ }
+
+ macro_rules! target_option_val {
+ ($attr:ident) => ( {
+ let name = (stringify!($attr)).replace("_", "-");
+ if default.$attr != self.options.$attr {
+ d.insert(name.to_string(), self.options.$attr.to_json());
+ }
+ } );
+ ($attr:ident, $key_name:expr) => ( {
+ let name = $key_name;
+ if default.$attr != self.options.$attr {
+ d.insert(name.to_string(), self.options.$attr.to_json());
+ }
+ } );
+ }
+
+ target_val!(llvm_target);
+ target_val!(target_endian);
+ target_val!(target_pointer_width);
+ target_val!(arch);
+ target_val!(target_os, "os");
+ target_val!(target_env, "env");
+ target_val!(target_vendor, "vendor");
+ target_val!(arch);
+ target_val!(data_layout);
+
+ target_option_val!(is_builtin);
+ target_option_val!(linker);
+ target_option_val!(ar);
+ target_option_val!(pre_link_args);
+ target_option_val!(pre_link_objects_exe);
+ target_option_val!(pre_link_objects_dll);
+ target_option_val!(late_link_args);
+ target_option_val!(post_link_objects);
+ target_option_val!(post_link_args);
+ target_option_val!(cpu);
+ target_option_val!(features);
+ target_option_val!(dynamic_linking);
+ target_option_val!(executables);
+ target_option_val!(relocation_model);
+ target_option_val!(code_model);
+ target_option_val!(disable_redzone);
+ target_option_val!(eliminate_frame_pointer);
+ target_option_val!(function_sections);
+ target_option_val!(dll_prefix);
+ target_option_val!(dll_suffix);
+ target_option_val!(exe_suffix);
+ target_option_val!(staticlib_prefix);
+ target_option_val!(staticlib_suffix);
+ target_option_val!(target_family);
+ target_option_val!(is_like_osx);
+ target_option_val!(is_like_solaris);
+ target_option_val!(is_like_windows);
+ target_option_val!(is_like_msvc);
+ target_option_val!(is_like_android);
+ target_option_val!(linker_is_gnu);
+ target_option_val!(allows_weak_linkage);
+ target_option_val!(has_rpath);
+ target_option_val!(no_compiler_rt);
+ target_option_val!(no_default_libraries);
+ target_option_val!(position_independent_executables);
+ target_option_val!(archive_format);
+ target_option_val!(allow_asm);
+ target_option_val!(custom_unwind_resume);
+ target_option_val!(lib_allocation_crate);
+ target_option_val!(exe_allocation_crate);
+ target_option_val!(has_elf_tls);
+ target_option_val!(obj_is_bitcode);
+ target_option_val!(max_atomic_width);
+
+ Json::Object(d)
+ }
+}
+
fn maybe_jemalloc() -> String {
if cfg!(feature = "jemalloc") {
"alloc_jemalloc".to_string()
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "ppc64".to_string();
base.pre_link_args.push("-m64".to_string());
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "64".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "ppc64le".to_string();
base.pre_link_args.push("-m64".to_string());
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m32".to_string());
base.max_atomic_width = 32;
- Target {
+ Ok(Target {
llvm_target: "powerpc-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
staticlib_suffix: ".lib".to_string(),
no_default_libraries: true,
is_like_windows: true,
+ allows_weak_linkage: false,
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::apple_base::opts();
base.cpu = "core2".to_string();
base.max_atomic_width = 128; // core2 support cmpxchg16b
base.eliminate_frame_pointer = false;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-apple-darwin".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "apple".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::{Target, TargetOptions};
+use target::{Target, TargetOptions, TargetResult};
use super::apple_ios_base::{opts, Arch};
-pub fn target() -> Target {
- Target {
+pub fn target() -> TargetResult {
+ let base = try!(opts(Arch::X86_64));
+ Ok(Target {
llvm_target: "x86_64-apple-ios".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_vendor: "apple".to_string(),
options: TargetOptions {
max_atomic_width: 64,
- .. opts(Arch::X86_64)
+ .. base
}
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::windows_base::opts();
base.cpu = "x86-64".to_string();
base.pre_link_args.push("-m64".to_string());
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "x86_64-pc-windows-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "gnu".to_string(),
target_vendor: "pc".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::windows_msvc_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "x86_64-pc-windows-msvc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "msvc".to_string(),
target_vendor: "pc".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::netbsd_base::opts();
+ base.cpu = "x86-64".to_string();
base.pre_link_args.push("-m64".to_string());
base.linker = "x86_64-rumprun-netbsd-gcc".to_string();
base.ar = "x86_64-rumprun-netbsd-ar".to_string();
base.no_default_libraries = false;
base.exe_allocation_crate = "alloc_system".to_string();
- Target {
+ Ok(Target {
llvm_target: "x86_64-rumprun-netbsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "rumprun".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::solaris_base::opts();
base.pre_link_args.push("-m64".to_string());
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
- Target {
+ Ok(Target {
llvm_target: "x86_64-pc-solaris".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "sun".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::bitrig_base::opts();
+ base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-bitrig".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::dragonfly_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-dragonfly".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::freebsd_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-freebsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::linux_musl_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "musl".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::netbsd_base::opts();
+ base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-netbsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use target::Target;
+use target::{Target, TargetResult};
-pub fn target() -> Target {
+pub fn target() -> TargetResult {
let mut base = super::openbsd_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = 64;
base.pre_link_args.push("-m64".to_string());
- Target {
+ Ok(Target {
llvm_target: "x86_64-unknown-openbsd".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_env: "".to_string(),
target_vendor: "unknown".to_string(),
options: base,
- }
+ })
}
decl,
body);
- intravisit::walk_fn(this, fk, decl, body, sp);
+ intravisit::walk_fn(this, fk, decl, body, sp, id);
}
fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
use rustc::hir;
use rustc::hir::{Pat, PatKind};
-use rustc::hir::intravisit::{self, IdVisitor, IdVisitingOperation, Visitor, FnKind};
+use rustc::hir::intravisit::{self, Visitor, FnKind};
use rustc_back::slice;
use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
renaming_map: &'map mut FnvHashMap<(NodeId, Span), NodeId>
}
-impl<'map> IdVisitingOperation for RenamingRecorder<'map> {
+impl<'v, 'map> Visitor<'v> for RenamingRecorder<'map> {
fn visit_id(&mut self, node_id: NodeId) {
let key = (node_id, self.origin_span);
self.renaming_map.insert(key, self.substituted_node_id);
renaming_map: renaming_map,
};
- let mut id_visitor = IdVisitor::new(&mut renaming_recorder);
-
- id_visitor.visit_expr(const_expr);
+ renaming_recorder.visit_expr(const_expr);
}
}
}
_ => cx.param_env = ParameterEnvironment::for_item(cx.tcx, fn_id),
}
- intravisit::walk_fn(cx, kind, decl, body, sp);
+ intravisit::walk_fn(cx, kind, decl, body, sp, fn_id);
for input in &decl.inputs {
check_irrefutable(cx, &input.pat, true);
use rustc::hir::map as ast_map;
use rustc::hir::map::blocks::FnLikeNode;
-use rustc::middle::cstore::{self, InlinedItem};
+use rustc::middle::cstore::InlinedItem;
use rustc::traits;
use rustc::hir::def::{Def, PathResolution};
use rustc::hir::def_id::DefId;
}
let mut used_substs = false;
let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
- cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node {
+ Some((&InlinedItem::Item(ref item), _)) => match item.node {
hir::ItemConst(ref ty, ref const_expr) => {
Some((&**const_expr, tcx.ast_ty_to_prim_ty(ty)))
},
_ => None
},
- cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node {
+ Some((&InlinedItem::TraitItem(trait_id, ref ti), _)) => match ti.node {
hir::ConstTraitItem(_, _) => {
used_substs = true;
if let Some(substs) = substs {
}
_ => None
},
- cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref ii)) => match ii.node {
+ Some((&InlinedItem::ImplItem(_, ref ii), _)) => match ii.node {
hir::ImplItemKind::Const(ref ty, ref expr) => {
Some((&**expr, tcx.ast_ty_to_prim_ty(ty)))
},
}
let fn_id = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
- cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => Some(item.id),
- cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref item)) => Some(item.id),
+ Some((&InlinedItem::Item(ref item), _)) => Some(item.id),
+ Some((&InlinedItem::ImplItem(_, ref item), _)) => Some(item.id),
_ => None
};
tcx.extern_const_fns.borrow_mut().insert(def_id,
// We need nested scopes here, because the intermediate results can keep
// large chunks of memory alive and we want to free them as soon as
// possible to keep the peak memory usage low
- let (outputs, trans) = {
+ let (outputs, trans, crate_name) = {
let krate = match phase_1_parse_input(sess, cfg, input) {
Ok(krate) => krate,
Err(mut parse_error) => {
};
let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess);
- let id = link::find_crate_name(Some(sess), &krate.attrs, input);
+ let crate_name = link::find_crate_name(Some(sess), &krate.attrs, input);
let ExpansionResult { expanded_crate, defs, analysis, resolutions, mut hir_forest } = {
phase_2_configure_and_expand(
- sess, &cstore, krate, &id, addl_plugins, control.make_glob_map,
+ sess, &cstore, krate, &crate_name, addl_plugins, control.make_glob_map,
|expanded_crate| {
let mut state = CompileState::state_after_expand(
- input, sess, outdir, output, &cstore, expanded_crate, &id,
+ input, sess, outdir, output, &cstore, expanded_crate, &crate_name,
);
controller_entry_point!(after_expand, sess, state, Ok(()));
Ok(())
)?
};
- write_out_deps(sess, &outputs, &id);
+ write_out_deps(sess, &outputs, &crate_name);
let arenas = ty::CtxtArenas::new();
&resolutions,
&expanded_crate,
&hir_map.krate(),
- &id),
+ &crate_name),
Ok(()));
}
analysis,
resolutions,
&arenas,
- &id,
+ &crate_name,
|tcx, mir_map, analysis, result| {
{
// Eventually, we will want to track plugins.
&analysis,
mir_map.as_ref(),
tcx,
- &id);
+ &crate_name);
(control.after_analysis.callback)(&mut state);
if control.after_analysis.stop == Compilation::Stop {
// Discard interned strings as they are no longer required.
token::clear_ident_interner();
- Ok((outputs, trans))
+ Ok((outputs, trans, crate_name.clone()))
})??
};
- let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs);
+ let phase5_result = phase_5_run_llvm_passes(sess, &crate_name, &trans, &outputs);
controller_entry_point!(after_llvm,
sess,
});
*sess.crate_types.borrow_mut() = collect_crate_types(sess, &krate.attrs);
- sess.crate_disambiguator.set(token::intern(&compute_crate_disambiguator(sess)));
+ *sess.crate_disambiguator.borrow_mut() =
+ token::intern(&compute_crate_disambiguator(sess)).as_str();
time(time_passes, "recursion limit", || {
middle::recursion_limit::update_recursion_limit(sess, &krate);
/// Run LLVM itself, producing a bitcode file, assembly file or object file
/// as a side effect.
pub fn phase_5_run_llvm_passes(sess: &Session,
+ crate_name: &str,
trans: &trans::CrateTranslation,
outputs: &OutputFilenames) -> CompileResult {
if sess.opts.cg.no_integrated_as {
|| write::run_passes(sess, trans, &sess.opts.output_types, outputs));
}
+ time(sess.time_passes(),
+ "serialize work products",
+ move || rustc_incremental::save_work_products(sess, crate_name));
+
if sess.err_count() > 0 {
Err(sess.err_count())
} else {
filename.replace(" ", "\\ ")
}
-fn write_out_deps(sess: &Session, outputs: &OutputFilenames, id: &str) {
+fn write_out_deps(sess: &Session, outputs: &OutputFilenames, crate_name: &str) {
let mut out_filenames = Vec::new();
for output_type in sess.opts.output_types.keys() {
let file = outputs.path(*output_type);
match *output_type {
OutputType::Exe => {
for output in sess.crate_types.borrow().iter() {
- let p = link::filename_for_input(sess, *output, id, outputs);
+ let p = link::filename_for_input(sess, *output, crate_name, outputs);
out_filenames.push(p);
}
}
#![feature(set_stdio)]
#![feature(staged_api)]
#![feature(question_mark)]
-#![feature(unboxed_closures)]
extern crate arena;
extern crate flate;
use rustc::session::early_error;
use syntax::{ast, json};
+use syntax::attr::AttrMetaMethods;
use syntax::codemap::{CodeMap, FileLoader, RealFileLoader};
use syntax::feature_gate::{GatedCfg, UnstableFeatures};
use syntax::parse::{self, PResult};
let mut saw_invalid_predicate = false;
for item in sopts.cfg.iter() {
- match item.node {
- ast::MetaItemKind::List(ref pred, _) => {
- saw_invalid_predicate = true;
- handler.emit(&MultiSpan::new(),
- &format!("invalid predicate in --cfg command line argument: `{}`",
- pred),
- errors::Level::Fatal);
- }
- _ => {},
+ if item.is_meta_item_list() {
+ saw_invalid_predicate = true;
+ handler.emit(&MultiSpan::new(),
+ &format!("invalid predicate in --cfg command line argument: `{}`",
+ item.name()),
+ errors::Level::Fatal);
}
}
for req in &sess.opts.prints {
match *req {
PrintRequest::TargetList => {
- let mut targets = rustc_back::target::TARGETS.to_vec();
+ let mut targets = rustc_back::target::get_targets().collect::<Vec<String>>();
targets.sort();
println!("{}", targets.join("\n"));
},
if !allow_unstable_cfg && GatedCfg::gate(&*cfg).is_some() {
continue;
}
- match cfg.node {
- ast::MetaItemKind::Word(ref word) => println!("{}", word),
- ast::MetaItemKind::NameValue(ref name, ref value) => {
- println!("{}=\"{}\"", name, match value.node {
- ast::LitKind::Str(ref s, _) => s,
- _ => continue,
- });
+ if cfg.is_word() {
+ println!("{}", cfg.name());
+ } else if cfg.is_value_str() {
+ if let Some(s) = cfg.value_str() {
+ println!("{}=\"{}\"", cfg.name(), s);
}
+ } else if cfg.is_meta_item_list() {
// Right now there are not and should not be any
// MetaItemKind::List items in the configuration returned by
// `build_configuration`.
- ast::MetaItemKind::List(..) => {
- panic!("MetaItemKind::List encountered in default cfg")
- }
+ panic!("MetaItemKind::List encountered in default cfg")
}
}
}
if attr.check_name(IF_THIS_CHANGED) {
let mut id = None;
for meta_item in attr.meta_item_list().unwrap_or_default() {
- match meta_item.node {
- ast::MetaItemKind::Word(ref s) if id.is_none() => id = Some(s.clone()),
- _ => {
- self.tcx.sess.span_err(
- meta_item.span,
- &format!("unexpected meta-item {:?}", meta_item.node));
- }
+ if meta_item.is_word() && id.is_none() {
+ id = Some(meta_item.name().clone());
+ } else {
+ // FIXME better-encapsulate meta_item (don't directly access `node`)
+ span_bug!(meta_item.span(), "unexpected meta-item {:?}", meta_item.node)
}
}
let id = id.unwrap_or(InternedString::new(ID));
let mut dep_node_interned = None;
let mut id = None;
for meta_item in attr.meta_item_list().unwrap_or_default() {
- match meta_item.node {
- ast::MetaItemKind::Word(ref s) if dep_node_interned.is_none() =>
- dep_node_interned = Some(s.clone()),
- ast::MetaItemKind::Word(ref s) if id.is_none() =>
- id = Some(s.clone()),
- _ => {
- self.tcx.sess.span_err(
- meta_item.span,
- &format!("unexpected meta-item {:?}", meta_item.node));
- }
+ if meta_item.is_word() && dep_node_interned.is_none() {
+ dep_node_interned = Some(meta_item.name().clone());
+ } else if meta_item.is_word() && id.is_none() {
+ id = Some(meta_item.name().clone());
+ } else {
+ // FIXME better-encapsulate meta_item (don't directly access `node`)
+ span_bug!(meta_item.span(), "unexpected meta-item {:?}", meta_item.node)
}
}
let dep_node = match dep_node_interned {
//! Calculation of a Strict Version Hash for crates. For a length
//! comment explaining the general idea, see `librustc/middle/svh.rs`.
+use syntax::attr::AttributeMethods;
use std::hash::{Hash, SipHasher, Hasher};
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
use rustc::hir::svh::Svh;
// to ensure it is not incorporating implementation artifacts into
// the hash that are not otherwise visible.)
- let crate_disambiguator = self.sess.crate_disambiguator.get();
+ let crate_disambiguator = self.sess.local_crate_disambiguator();
let krate = self.map.krate();
// FIXME: this should use SHA1, not SipHash. SipHash is not built to
// FIXME(#32753) -- at (*) we `to_le` for endianness, but is
// this enough, and does it matter anyway?
"crate_disambiguator".hash(&mut state);
- crate_disambiguator.as_str().len().to_le().hash(&mut state); // (*)
- crate_disambiguator.as_str().hash(&mut state);
+ crate_disambiguator.len().to_le().hash(&mut state); // (*)
+ crate_disambiguator.hash(&mut state);
- debug!("crate_disambiguator: {:?}", crate_disambiguator.as_str());
+ debug!("crate_disambiguator: {:?}", crate_disambiguator);
debug!("state: {:?}", state);
{
// to avoid hashing the AttrId
for attr in &krate.attrs {
debug!("krate attr {:?}", attr);
- attr.node.value.hash(&mut state);
+ attr.meta().hash(&mut state);
}
Svh::new(state.finish())
use rustc::ty::TyCtxt;
use rustc::hir;
use rustc::hir::*;
+ use rustc::hir::map::DefPath;
use rustc::hir::intravisit as visit;
use rustc::hir::intravisit::{Visitor, FnKind};
-> Self {
StrictVersionHashVisitor { st: st, tcx: tcx }
}
+
+ fn hash_def_path(&mut self, path: &DefPath) {
+ self.tcx.crate_name(path.krate).hash(self.st);
+ self.tcx.crate_disambiguator(path.krate).hash(self.st);
+ for data in &path.data {
+ data.data.as_interned_str().hash(self.st);
+ data.disambiguator.hash(self.st);
+ }
+ }
}
// To off-load the bulk of the hash-computation on #[derive(Hash)],
impl<'a, 'tcx> Visitor<'a> for StrictVersionHashVisitor<'a, 'tcx> {
fn visit_nested_item(&mut self, item: ItemId) {
- debug!("visit_nested_item: {:?} st={:?}", item, self.st);
- let def_path = self.tcx.map.def_path_from_id(item.id);
- def_path.hash(self.st);
+ let def_path = self.tcx.map.def_path_from_id(item.id).unwrap();
+ debug!("visit_nested_item: def_path={:?} st={:?}", def_path, self.st);
+ self.hash_def_path(&def_path);
}
fn visit_variant_data(&mut self, s: &'a VariantData, name: Name,
g: &'a Generics, _: NodeId, _: Span) {
+ debug!("visit_variant_data: st={:?}", self.st);
SawStructDef(name.as_str()).hash(self.st);
visit::walk_generics(self, g);
visit::walk_struct_def(self, s)
}
fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) {
+ debug!("visit_variant: st={:?}", self.st);
SawVariant.hash(self.st);
// walk_variant does not call walk_generics, so do it here.
visit::walk_generics(self, g);
// pattern, please move that method up above this comment.)
fn visit_name(&mut self, _: Span, name: Name) {
+ debug!("visit_name: st={:?}", self.st);
SawIdent(name.as_str()).hash(self.st);
}
fn visit_lifetime(&mut self, l: &'a Lifetime) {
+ debug!("visit_lifetime: st={:?}", self.st);
SawLifetime(l.name.as_str()).hash(self.st);
}
fn visit_lifetime_def(&mut self, l: &'a LifetimeDef) {
+ debug!("visit_lifetime_def: st={:?}", self.st);
SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
}
// that a change to a crate body will require downstream
// crates to be recompiled.
fn visit_expr(&mut self, ex: &'a Expr) {
+ debug!("visit_expr: st={:?}", self.st);
SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
}
fn visit_stmt(&mut self, s: &'a Stmt) {
+ debug!("visit_stmt: st={:?}", self.st);
SawStmt(saw_stmt(&s.node)).hash(self.st); visit::walk_stmt(self, s)
}
fn visit_foreign_item(&mut self, i: &'a ForeignItem) {
+ debug!("visit_foreign_item: st={:?}", self.st);
+
// FIXME (#14132) ideally we would incorporate privacy (or
// perhaps reachability) somewhere here, so foreign items
// that do not leak into downstream crates would not be
fn visit_item(&mut self, i: &'a Item) {
debug!("visit_item: {:?} st={:?}", i, self.st);
+
// FIXME (#14132) ideally would incorporate reachability
// analysis somewhere here, so items that never leak into
// downstream crates (e.g. via monomorphisation or
SawItem.hash(self.st); visit::walk_item(self, i)
}
- fn visit_mod(&mut self, m: &'a Mod, _s: Span, _n: NodeId) {
- SawMod.hash(self.st); visit::walk_mod(self, m)
+ fn visit_mod(&mut self, m: &'a Mod, _s: Span, n: NodeId) {
+ debug!("visit_mod: st={:?}", self.st);
+ SawMod.hash(self.st); visit::walk_mod(self, m, n)
}
fn visit_decl(&mut self, d: &'a Decl) {
+ debug!("visit_decl: st={:?}", self.st);
SawDecl.hash(self.st); visit::walk_decl(self, d)
}
fn visit_ty(&mut self, t: &'a Ty) {
+ debug!("visit_ty: st={:?}", self.st);
SawTy.hash(self.st); visit::walk_ty(self, t)
}
fn visit_generics(&mut self, g: &'a Generics) {
+ debug!("visit_generics: st={:?}", self.st);
SawGenerics.hash(self.st); visit::walk_generics(self, g)
}
fn visit_fn(&mut self, fk: FnKind<'a>, fd: &'a FnDecl,
- b: &'a Block, s: Span, _: NodeId) {
- SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s)
+ b: &'a Block, s: Span, n: NodeId) {
+ debug!("visit_fn: st={:?}", self.st);
+ SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n)
}
fn visit_trait_item(&mut self, ti: &'a TraitItem) {
+ debug!("visit_trait_item: st={:?}", self.st);
SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
}
fn visit_impl_item(&mut self, ii: &'a ImplItem) {
+ debug!("visit_impl_item: st={:?}", self.st);
SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
}
fn visit_struct_field(&mut self, s: &'a StructField) {
+ debug!("visit_struct_field: st={:?}", self.st);
SawStructField.hash(self.st); visit::walk_struct_field(self, s)
}
fn visit_path(&mut self, path: &'a Path, _: ast::NodeId) {
+ debug!("visit_path: st={:?}", self.st);
SawPath.hash(self.st); visit::walk_path(self, path)
}
fn visit_block(&mut self, b: &'a Block) {
+ debug!("visit_block: st={:?}", self.st);
SawBlock.hash(self.st); visit::walk_block(self, b)
}
fn visit_pat(&mut self, p: &'a Pat) {
+ debug!("visit_pat: st={:?}", self.st);
SawPat.hash(self.st); visit::walk_pat(self, p)
}
fn visit_local(&mut self, l: &'a Local) {
+ debug!("visit_local: st={:?}", self.st);
SawLocal.hash(self.st); visit::walk_local(self, l)
}
fn visit_arm(&mut self, a: &'a Arm) {
+ debug!("visit_arm: st={:?}", self.st);
SawArm.hash(self.st); visit::walk_arm(self, a)
}
}
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![cfg_attr(not(stage0), deny(warnings))]
+#![feature(question_mark)]
#![feature(rustc_private)]
#![feature(staged_api)]
pub use calculate_svh::SvhCalculate;
pub use persist::load_dep_graph;
pub use persist::save_dep_graph;
+pub use persist::save_trans_partition;
+pub use persist::save_work_products;
+pub use persist::in_incr_comp_dir;
//! The data that we will serialize and deserialize.
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::{DepNode, WorkProduct, WorkProductId};
use rustc::hir::def_id::DefIndex;
+use std::sync::Arc;
use super::directory::DefPathIndex;
pub hash: u64,
}
+#[derive(Debug, RustcEncodable, RustcDecodable)]
+pub struct SerializedWorkProduct {
+ /// node that produced the work-product
+ pub id: Arc<WorkProductId>,
+
+ /// work-product data itself
+ pub work_product: WorkProduct,
+}
+
/// Data for use when downstream crates get recompiled.
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct SerializedMetadataHashes {
use rbml::opaque::Decoder;
use rustc::dep_graph::DepNode;
use rustc::hir::def_id::DefId;
+use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc_data_structures::fnv::FnvHashSet;
use rustc_serialize::Decodable as RustcDecodable;
use std::io::Read;
-use std::fs::File;
-use std::path::Path;
+use std::fs::{self, File};
+use std::path::{Path};
use super::data::*;
use super::directory::*;
/// actually it doesn't matter all that much.) See `README.md` for
/// more general overview.
pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+ if tcx.sess.opts.incremental.is_none() {
+ return;
+ }
+
let _ignore = tcx.dep_graph.in_ignore();
+ load_dep_graph_if_exists(tcx);
+ dirty_clean::check_dirty_clean_annotations(tcx);
+}
- if let Some(dep_graph) = dep_graph_path(tcx) {
- // FIXME(#32754) lock file?
- load_dep_graph_if_exists(tcx, &dep_graph);
- dirty_clean::check_dirty_clean_annotations(tcx);
+fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+ let dep_graph_path = dep_graph_path(tcx).unwrap();
+ let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) {
+ Some(p) => p,
+ None => return // no file
+ };
+
+ let work_products_path = tcx_work_products_path(tcx).unwrap();
+ let work_products_data = match load_data(tcx.sess, &work_products_path) {
+ Some(p) => p,
+ None => return // no file
+ };
+
+ match decode_dep_graph(tcx, &dep_graph_data, &work_products_data) {
+ Ok(()) => return,
+ Err(err) => {
+ tcx.sess.warn(
+ &format!("decoding error in dep-graph from `{}` and `{}`: {}",
+ dep_graph_path.display(),
+ work_products_path.display(),
+ err));
+ }
}
}
-pub fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, path: &Path) {
+fn load_data(sess: &Session, path: &Path) -> Option<Vec<u8>> {
if !path.exists() {
- return;
+ return None;
}
let mut data = vec![];
File::open(path)
.and_then(|mut file| file.read_to_end(&mut data))
{
- Ok(_) => { }
+ Ok(_) => {
+ Some(data)
+ }
Err(err) => {
- tcx.sess.err(
+ sess.err(
&format!("could not load dep-graph from `{}`: {}",
path.display(), err));
- return;
+ None
}
}
- match decode_dep_graph(tcx, &data) {
- Ok(dirty) => dirty,
- Err(err) => {
- bug!("decoding error in dep-graph from `{}`: {}", path.display(), err);
- }
- }
}
+/// Decode the dep graph and load the edges/nodes that are still clean
+/// into `tcx.dep_graph`.
pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- data: &[u8])
+ dep_graph_data: &[u8],
+ work_products_data: &[u8])
-> Result<(), Error>
{
// Deserialize the directory and dep-graph.
- let mut decoder = Decoder::new(data, 0);
- let directory = try!(DefIdDirectory::decode(&mut decoder));
- let serialized_dep_graph = try!(SerializedDepGraph::decode(&mut decoder));
+ let mut dep_graph_decoder = Decoder::new(dep_graph_data, 0);
+ let directory = try!(DefIdDirectory::decode(&mut dep_graph_decoder));
+ let serialized_dep_graph = try!(SerializedDepGraph::decode(&mut dep_graph_decoder));
debug!("decode_dep_graph: directory = {:#?}", directory);
debug!("decode_dep_graph: serialized_dep_graph = {:#?}", serialized_dep_graph);
// Add nodes and edges that are not dirty into our main graph.
let dep_graph = tcx.dep_graph.clone();
for (source, target) in clean_edges.into_iter().chain(clean_nodes) {
- let _task = dep_graph.in_task(target.clone());
- dep_graph.read(source.clone());
-
debug!("decode_dep_graph: clean edge: {:?} -> {:?}", source, target);
+
+ let _task = dep_graph.in_task(target);
+ dep_graph.read(source);
}
+ // Add in work-products that are still clean, and delete those that are
+ // dirty.
+ let mut work_product_decoder = Decoder::new(work_products_data, 0);
+ let work_products = try!(<Vec<SerializedWorkProduct>>::decode(&mut work_product_decoder));
+ reconcile_work_products(tcx, work_products, &dirty_nodes);
+
Ok(())
}
match hash.node.map_def(|&i| retraced.def_id(i)) {
Some(dep_node) => {
let current_hash = hcx.hash(&dep_node).unwrap();
- debug!("initial_dirty_nodes: hash of {:?} is {:?}, was {:?}",
- dep_node, current_hash, hash.hash);
if current_hash != hash.hash {
+ debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}",
+ dep_node, current_hash, hash.hash);
dirty_nodes.insert(dep_node);
}
}
clean_edges.push((source, target))
} else {
// source removed, target must be dirty
+ debug!("compute_clean_edges: {:?} dirty because {:?} no longer exists",
+ target, serialized_source);
dirty_nodes.insert(target);
}
} else {
clean_edges
}
+
+/// Go through the list of work-products produced in the previous run.
+/// Delete any whose nodes have been found to be dirty or which are
+/// otherwise no longer applicable.
+fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ work_products: Vec<SerializedWorkProduct>,
+ dirty_nodes: &DirtyNodes) {
+ debug!("reconcile_work_products({:?})", work_products);
+ for swp in work_products {
+ let dep_node = DepNode::WorkProduct(swp.id.clone());
+ if dirty_nodes.contains(&dep_node) {
+ debug!("reconcile_work_products: dep-node for {:?} is dirty", swp);
+ delete_dirty_work_product(tcx, swp);
+ } else {
+ let all_files_exist =
+ swp.work_product
+ .saved_files
+ .iter()
+ .all(|&(_, ref file_name)| {
+ let path = in_incr_comp_dir(tcx.sess, &file_name).unwrap();
+ path.exists()
+ });
+ if all_files_exist {
+ debug!("reconcile_work_products: all files for {:?} exist", swp);
+ tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product);
+ } else {
+ debug!("reconcile_work_products: some file for {:?} does not exist", swp);
+ delete_dirty_work_product(tcx, swp);
+ }
+ }
+ }
+}
+
+fn delete_dirty_work_product(tcx: TyCtxt,
+ swp: SerializedWorkProduct) {
+ debug!("delete_dirty_work_product({:?})", swp);
+ for &(_, ref file_name) in &swp.work_product.saved_files {
+ let path = in_incr_comp_dir(tcx.sess, file_name).unwrap();
+ match fs::remove_file(&path) {
+ Ok(()) => { }
+ Err(err) => {
+ tcx.sess.warn(
+ &format!("file-system error deleting outdated file `{}`: {}",
+ path.display(), err));
+ }
+ }
+ }
+}
mod load;
mod save;
mod util;
+mod work_product;
pub use self::load::load_dep_graph;
pub use self::save::save_dep_graph;
+pub use self::save::save_work_products;
+pub use self::work_product::save_trans_partition;
+pub use self::util::in_incr_comp_dir;
use rbml::opaque::Encoder;
use rustc::dep_graph::DepNode;
use rustc::middle::cstore::LOCAL_CRATE;
+use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc_serialize::{Encodable as RustcEncodable};
use std::hash::{Hasher, SipHasher};
use super::util::*;
pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+ debug!("save_dep_graph()");
let _ignore = tcx.dep_graph.in_ignore();
+ let sess = tcx.sess;
let mut hcx = HashContext::new(tcx);
- save_in(&mut hcx, dep_graph_path(tcx), encode_dep_graph);
- save_in(&mut hcx, metadata_hash_path(tcx, LOCAL_CRATE), encode_metadata_hashes);
+ save_in(sess, dep_graph_path(tcx), |e| encode_dep_graph(&mut hcx, e));
+ save_in(sess, metadata_hash_path(tcx, LOCAL_CRATE), |e| encode_metadata_hashes(&mut hcx, e));
}
-fn save_in<'a, 'tcx, F>(hcx: &mut HashContext<'a, 'tcx>,
- opt_path_buf: Option<PathBuf>,
- encode: F)
- where F: FnOnce(&mut HashContext<'a, 'tcx>, &mut Encoder) -> io::Result<()>
-{
- let tcx = hcx.tcx;
+pub fn save_work_products(sess: &Session, local_crate_name: &str) {
+ debug!("save_work_products()");
+ let _ignore = sess.dep_graph.in_ignore();
+ let path = sess_work_products_path(sess, local_crate_name);
+ save_in(sess, path, |e| encode_work_products(sess, e));
+}
+fn save_in<F>(sess: &Session,
+ opt_path_buf: Option<PathBuf>,
+ encode: F)
+ where F: FnOnce(&mut Encoder) -> io::Result<()>
+{
let path_buf = match opt_path_buf {
Some(p) => p,
None => return
match fs::remove_file(&path_buf) {
Ok(()) => { }
Err(err) => {
- tcx.sess.err(
+ sess.err(
&format!("unable to delete old dep-graph at `{}`: {}",
path_buf.display(), err));
return;
// generate the data in a memory buffer
let mut wr = Cursor::new(Vec::new());
- match encode(hcx, &mut Encoder::new(&mut wr)) {
+ match encode(&mut Encoder::new(&mut wr)) {
Ok(()) => { }
Err(err) => {
- tcx.sess.err(
+ sess.err(
&format!("could not encode dep-graph to `{}`: {}",
path_buf.display(), err));
return;
{
Ok(_) => { }
Err(err) => {
- tcx.sess.err(
+ sess.err(
&format!("failed to write dep-graph to `{}`: {}",
path_buf.display(), err));
return;
Ok(())
}
+
+pub fn encode_work_products(sess: &Session,
+ encoder: &mut Encoder)
+ -> io::Result<()>
+{
+ let work_products: Vec<_> =
+ sess.dep_graph.work_products()
+ .iter()
+ .map(|(id, work_product)| {
+ SerializedWorkProduct {
+ id: id.clone(),
+ work_product: work_product.clone(),
+ }
+ })
+ .collect();
+
+ work_products.encode(encoder)
+}
+
// except according to those terms.
use rustc::middle::cstore::LOCAL_CRATE;
+use rustc::session::Session;
use rustc::ty::TyCtxt;
use std::fs;
use syntax::ast;
pub fn dep_graph_path(tcx: TyCtxt) -> Option<PathBuf> {
- path(tcx, LOCAL_CRATE, "local")
+ tcx_path(tcx, LOCAL_CRATE, "local")
}
pub fn metadata_hash_path(tcx: TyCtxt, cnum: ast::CrateNum) -> Option<PathBuf> {
- path(tcx, cnum, "metadata")
+ tcx_path(tcx, cnum, "metadata")
}
-fn path(tcx: TyCtxt, cnum: ast::CrateNum, suffix: &str) -> Option<PathBuf> {
+pub fn tcx_work_products_path(tcx: TyCtxt) -> Option<PathBuf> {
+ let crate_name = tcx.crate_name(LOCAL_CRATE);
+ sess_work_products_path(tcx.sess, &crate_name)
+}
+
+pub fn sess_work_products_path(sess: &Session,
+ local_crate_name: &str)
+ -> Option<PathBuf> {
+ let crate_disambiguator = sess.local_crate_disambiguator();
+ path(sess, local_crate_name, &crate_disambiguator, "work-products")
+}
+
+pub fn in_incr_comp_dir(sess: &Session, file_name: &str) -> Option<PathBuf> {
+ sess.opts.incremental.as_ref().map(|incr_dir| incr_dir.join(file_name))
+}
+
+fn tcx_path(tcx: TyCtxt,
+ cnum: ast::CrateNum,
+ middle: &str)
+ -> Option<PathBuf> {
+ path(tcx.sess, &tcx.crate_name(cnum), &tcx.crate_disambiguator(cnum), middle)
+}
+
+fn path(sess: &Session,
+ crate_name: &str,
+ crate_disambiguator: &str,
+ middle: &str)
+ -> Option<PathBuf> {
// For now, just save/load dep-graph from
// directory/dep_graph.rbml
- tcx.sess.opts.incremental.as_ref().and_then(|incr_dir| {
+ sess.opts.incremental.as_ref().and_then(|incr_dir| {
match create_dir_racy(&incr_dir) {
Ok(()) => {}
Err(err) => {
- tcx.sess.err(
+ sess.err(
&format!("could not create the directory `{}`: {}",
incr_dir.display(), err));
return None;
}
}
- let crate_name = tcx.crate_name(cnum);
- let crate_disambiguator = tcx.crate_disambiguator(cnum);
- let file_name = format!("{}-{}.{}.bin",
- crate_name,
- crate_disambiguator,
- suffix);
+ let file_name = format!("{}-{}.{}.bin", crate_name, crate_disambiguator, middle);
+
Some(incr_dir.join(file_name))
})
}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This module contains files for saving intermediate work-products.
+
+use persist::util::*;
+use rustc::dep_graph::{WorkProduct, WorkProductId};
+use rustc::session::Session;
+use rustc::session::config::OutputType;
+use rustc::util::fs::link_or_copy;
+use std::path::PathBuf;
+use std::sync::Arc;
+
+pub fn save_trans_partition(sess: &Session,
+ cgu_name: &str,
+ partition_hash: u64,
+ files: &[(OutputType, PathBuf)]) {
+ debug!("save_trans_partition({:?},{},{:?})",
+ cgu_name,
+ partition_hash,
+ files);
+ if sess.opts.incremental.is_none() {
+ return;
+ }
+ let work_product_id = Arc::new(WorkProductId(cgu_name.to_string()));
+
+ let saved_files: Option<Vec<_>> =
+ files.iter()
+ .map(|&(kind, ref path)| {
+ let file_name = format!("cgu-{}.{}", cgu_name, kind.extension());
+ let path_in_incr_dir = in_incr_comp_dir(sess, &file_name).unwrap();
+ match link_or_copy(path, &path_in_incr_dir) {
+ Ok(_) => Some((kind, file_name)),
+ Err(err) => {
+ sess.warn(&format!("error copying object file `{}` \
+ to incremental directory as `{}`: {}",
+ path.display(),
+ path_in_incr_dir.display(),
+ err));
+ None
+ }
+ }
+ })
+ .collect();
+ let saved_files = match saved_files {
+ Some(v) => v,
+ None => return,
+ };
+
+ let work_product = WorkProduct {
+ input_hash: partition_hash,
+ saved_files: saved_files,
+ };
+
+ sess.dep_graph.insert_work_product(&work_product_id, work_product);
+}
use std::collections::HashSet;
use syntax::{ast};
-use syntax::attr::{self, AttrMetaMethods};
+use syntax::attr::{self, AttrMetaMethods, AttributeMethods};
use syntax_pos::{self, Span};
use rustc::hir::{self, PatKind};
}
}
- let has_doc = attrs.iter().any(|a| {
- match a.node.value.node {
- ast::MetaItemKind::NameValue(ref name, _) if *name == "doc" => true,
- _ => false
- }
- });
+ let has_doc = attrs.iter().any(|a| a.is_value_str() && a.name() == "doc");
if !has_doc {
cx.span_lint(MISSING_DOCS, sp,
&format!("missing documentation for {}", desc));
impl LateLintPass for UnstableFeatures {
fn check_attribute(&mut self, ctx: &LateContext, attr: &ast::Attribute) {
- if attr::contains_name(&[attr.node.value.clone()], "feature") {
- if let Some(items) = attr.node.value.meta_item_list() {
+ if attr::contains_name(&[attr.meta().clone()], "feature") {
+ if let Some(items) = attr.meta().meta_item_list() {
for item in items {
- ctx.span_lint(UNSTABLE_FEATURES, item.span, "unstable feature");
+ ctx.span_lint(UNSTABLE_FEATURES, item.span(), "unstable feature");
}
}
}
[build-dependencies]
build_helper = { path = "../build_helper" }
-gcc = "0.3"
+gcc = "0.3.27"
use std::process::Command;
use std::env;
-use std::path::PathBuf;
+use std::path::{PathBuf, Path};
use build_helper::output;
&lib[2..]
} else if lib.starts_with("-") {
&lib[1..]
+ } else if Path::new(lib).exists() {
+ // On MSVC llvm-config will print the full name to libraries, but
+ // we're only interested in the name part
+ let name = Path::new(lib).file_name().unwrap().to_str().unwrap();
+ name.trim_right_matches(".lib")
+ } else if lib.ends_with(".lib") {
+ // Some MSVC libraries just come up with `.lib` tacked on, so chop
+ // that off
+ lib.trim_right_matches(".lib")
} else {
- continue;
+ continue
};
// Don't need or want this library, but LLVM's CMake build system
// library and it otherwise may just pull in extra dependencies on
// libedit which we don't want
if name == "LLVMLineEditor" {
- continue;
+ continue
}
let kind = if name.starts_with("LLVM") {
let mut cmd = Command::new(&llvm_config);
cmd.arg("--ldflags");
for lib in output(&mut cmd).split_whitespace() {
- if is_crossed {
+ if lib.starts_with("-LIBPATH:") {
+ println!("cargo:rustc-link-search=native={}", &lib[9..]);
+ } else if is_crossed {
if lib.starts_with("-L") {
println!("cargo:rustc-link-search=native={}",
lib[2..].replace(&host, &target));
pub fn apply_callsite(&self, idx: usize, callsite: ValueRef) {
unsafe {
- LLVMAddCallSiteAttribute(callsite, idx as c_uint, self.regular.bits());
+ LLVMRustAddCallSiteAttribute(callsite, idx as c_uint, self.regular.bits());
if self.dereferenceable_bytes != 0 {
LLVMAddDereferenceableCallSiteAttr(callsite, idx as c_uint,
self.dereferenceable_bytes);
pub fn LLVMSetInstrParamAlignment(Instr: ValueRef,
index: c_uint,
align: c_uint);
- pub fn LLVMAddCallSiteAttribute(Instr: ValueRef,
+ pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef,
index: c_uint,
Val: uint64_t);
pub fn LLVMAddDereferenceableCallSiteAttr(Instr: ValueRef,
Alignment: c_uint)
-> ValueRef;
- pub fn LLVMBuildAtomicCmpXchg(B: BuilderRef,
+ pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef,
LHS: ValueRef,
CMP: ValueRef,
RHS: ValueRef,
/// Creates target data from a target layout string.
pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef;
- /// Adds the target data to the given pass manager. The pass manager
- /// references the target data only weakly.
- pub fn LLVMAddTargetData(TD: TargetDataRef, PM: PassManagerRef);
/// Number of bytes clobbered when doing a Store to *T.
pub fn LLVMStoreSizeOfType(TD: TargetDataRef, Ty: TypeRef)
-> c_ulonglong;
pub fn LLVMRustSetComdat(M: ModuleRef, V: ValueRef, Name: *const c_char);
pub fn LLVMRustUnsetComdat(V: ValueRef);
+ pub fn LLVMRustSetModulePIELevel(M: ModuleRef);
}
// LLVM requires symbols from this library, but apparently they're not printed
use rustc::hir;
use rustc::hir::fold;
use rustc::hir::fold::Folder;
-use rustc::hir::intravisit::{IdRange, IdRangeComputingVisitor, IdVisitingOperation};
+use rustc::hir::intravisit::{Visitor, IdRangeComputingVisitor, IdRange};
use common as c;
use cstore;
rbml_w.writer.seek(SeekFrom::Current(0)));
// Folding could be avoided with a smarter encoder.
- let ii = simplify_ast(ii);
+ let (ii, expected_id_range) = simplify_ast(ii);
let id_range = inlined_item_id_range(&ii);
+ assert_eq!(expected_id_range, id_range);
rbml_w.start_tag(c::tag_ast as usize);
id_range.encode(rbml_w);
pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
// from_id_range should be non-empty
assert!(!self.from_id_range.empty());
+ // Make sure that translating the NodeId will actually yield a
+ // meaningful result
+ assert!(self.from_id_range.contains(id));
+
// Use wrapping arithmetic because otherwise it introduces control flow.
// Maybe we should just have the control flow? -- aatch
(id.wrapping_sub(self.from_id_range.min).wrapping_add(self.to_id_range.min))
rbml_w.end_tag();
}
-struct NestedItemsDropper;
+struct NestedItemsDropper {
+ id_range: IdRange
+}
impl Folder for NestedItemsDropper {
+
+ // The unit tests below run on HIR with NodeIds not properly assigned. That
+ // causes an integer overflow. So we just don't track the id_range when
+ // building the unit tests.
+ #[cfg(not(test))]
+ fn new_id(&mut self, id: ast::NodeId) -> ast::NodeId {
+ // Record the range of NodeIds we are visiting, so we can do a sanity
+ // check later
+ self.id_range.add(id);
+ id
+ }
+
fn fold_block(&mut self, blk: P<hir::Block>) -> P<hir::Block> {
blk.and_then(|hir::Block {id, stmts, expr, rules, span, ..}| {
let stmts_sans_items = stmts.into_iter().filter_map(|stmt| {
// As it happens, trans relies on the fact that we do not export
// nested items, as otherwise it would get confused when translating
// inlined items.
-fn simplify_ast(ii: InlinedItemRef) -> InlinedItem {
- let mut fld = NestedItemsDropper;
+fn simplify_ast(ii: InlinedItemRef) -> (InlinedItem, IdRange) {
+ let mut fld = NestedItemsDropper {
+ id_range: IdRange::max()
+ };
- match ii {
+ let ii = match ii {
// HACK we're not dropping items.
InlinedItemRef::Item(i) => {
InlinedItem::Item(P(fold::noop_fold_item(i.clone(), &mut fld)))
InlinedItemRef::Foreign(i) => {
InlinedItem::Foreign(P(fold::noop_fold_foreign_item(i.clone(), &mut fld)))
}
- }
+ };
+
+ (ii, fld.id_range)
}
fn decode_ast(item_doc: rbml::Doc) -> InlinedItem {
match *self {
Def::Fn(did) => Def::Fn(did.tr(dcx)),
Def::Method(did) => Def::Method(did.tr(dcx)),
- Def::SelfTy(opt_did, impl_id) => { Def::SelfTy(opt_did.map(|did| did.tr(dcx)),
- impl_id.map(|id| dcx.tr_id(id))) }
+ Def::SelfTy(opt_did, impl_id) => {
+ // Since the impl_id will never lie within the reserved range of
+ // imported NodeIds, it does not make sense to translate it.
+ // The result would not make any sense within the importing crate.
+ // We also don't allow for impl items to be inlined (just their
+ // members), so even if we had a DefId here, we wouldn't be able
+ // to do much with it.
+ // So, we set the id to DUMMY_NODE_ID. That way we make it
+ // explicit that this is no usable NodeId.
+ Def::SelfTy(opt_did.map(|did| did.tr(dcx)),
+ impl_id.map(|_| ast::DUMMY_NODE_ID))
+ }
Def::Mod(did) => { Def::Mod(did.tr(dcx)) }
Def::ForeignMod(did) => { Def::ForeignMod(did.tr(dcx)) }
Def::Static(did, m) => { Def::Static(did.tr(dcx), m) }
rbml_w: &'a mut Encoder<'b>,
}
-impl<'a, 'b, 'c, 'tcx> IdVisitingOperation for
+impl<'a, 'b, 'c, 'tcx, 'v> Visitor<'v> for
SideTableEncodingIdVisitor<'a, 'b, 'c, 'tcx> {
fn visit_id(&mut self, id: ast::NodeId) {
encode_side_tables_for_id(self.ecx, self.rbml_w, id)
rbml_w: &mut Encoder,
ii: &InlinedItem) {
rbml_w.start_tag(c::tag_table as usize);
- ii.visit_ids(&mut SideTableEncodingIdVisitor {
+ ii.visit(&mut SideTableEncodingIdVisitor {
ecx: ecx,
rbml_w: rbml_w
});
}
}
-fn inlined_item_id_range(v: &InlinedItem) -> IdRange {
+fn inlined_item_id_range(ii: &InlinedItem) -> IdRange {
let mut visitor = IdRangeComputingVisitor::new();
- v.visit_ids(&mut visitor);
+ ii.visit(&mut visitor);
visitor.result()
}
with_testing_context(|lcx| {
let hir_item = lcx.lower_item(&item);
let item_in = InlinedItemRef::Item(&hir_item);
- let item_out = simplify_ast(item_in);
+ let (item_out, _) = simplify_ast(item_in);
let item_exp = InlinedItem::Item(P(lcx.lower_item("e_item!(&cx,
fn new_int_alist<B>() -> alist<isize, B> {
return alist {eq_fn: eq_int, data: Vec::new()};
// Check for (potential) conflicts with the local crate
if self.local_crate_name == crate_name &&
- self.sess.crate_disambiguator.get().as_str() == disambiguator {
+ self.sess.local_crate_disambiguator() == disambiguator {
span_fatal!(self.sess, span, E0519,
"the current crate is indistinguishable from one of its \
dependencies: it has the same crate-name `{}` and was \
use encoder;
use loader;
-use middle::cstore::{CrateStore, CrateSource, ChildItem, ExternCrate, FoundAst, DefLike};
+use middle::cstore::{InlinedItem, CrateStore, CrateSource, ChildItem, ExternCrate, DefLike};
use middle::cstore::{NativeLibraryKind, LinkMeta, LinkagePreference};
use rustc::hir::def;
use middle::lang_items;
result
}
- fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
- -> FoundAst<'tcx>
+ fn maybe_get_item_ast<'a>(&'tcx self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> Option<(&'tcx InlinedItem, ast::NodeId)>
{
- self.dep_graph.read(DepNode::MetaData(def));
- let cdata = self.get_crate_data(def.krate);
- decoder::maybe_get_item_ast(&cdata, tcx, def.index)
+ self.dep_graph.read(DepNode::MetaData(def_id));
+
+ match self.inlined_item_cache.borrow().get(&def_id) {
+ Some(&None) => {
+ return None; // Not inlinable
+ }
+ Some(&Some(ref cached_inlined_item)) => {
+ // Already inline
+ debug!("maybe_get_item_ast({}): already inline as node id {}",
+ tcx.item_path_str(def_id), cached_inlined_item.item_id);
+ return Some((tcx.map.expect_inlined_item(cached_inlined_item.inlined_root),
+ cached_inlined_item.item_id));
+ }
+ None => {
+ // Not seen yet
+ }
+ }
+
+ debug!("maybe_get_item_ast({}): inlining item", tcx.item_path_str(def_id));
+
+ let cdata = self.get_crate_data(def_id.krate);
+ let inlined = decoder::maybe_get_item_ast(&cdata, tcx, def_id.index);
+
+ let cache_inlined_item = |original_def_id, inlined_item_id, inlined_root_node_id| {
+ let cache_entry = cstore::CachedInlinedItem {
+ inlined_root: inlined_root_node_id,
+ item_id: inlined_item_id,
+ };
+ self.inlined_item_cache
+ .borrow_mut()
+ .insert(original_def_id, Some(cache_entry));
+ self.defid_for_inlined_node
+ .borrow_mut()
+ .insert(inlined_item_id, original_def_id);
+ };
+
+ let find_inlined_item_root = |inlined_item_id| {
+ let mut node = inlined_item_id;
+ let mut path = Vec::with_capacity(10);
+
+ // If we can't find the inline root after a thousand hops, we can
+ // be pretty sure there's something wrong with the HIR map.
+ for _ in 0 .. 1000 {
+ path.push(node);
+ let parent_node = tcx.map.get_parent_node(node);
+ if parent_node == node {
+ return node;
+ }
+ node = parent_node;
+ }
+ bug!("cycle in HIR map parent chain")
+ };
+
+ match inlined {
+ decoder::FoundAst::NotFound => {
+ self.inlined_item_cache
+ .borrow_mut()
+ .insert(def_id, None);
+ }
+ decoder::FoundAst::Found(&InlinedItem::Item(ref item)) => {
+ let inlined_root_node_id = find_inlined_item_root(item.id);
+ cache_inlined_item(def_id, item.id, inlined_root_node_id);
+ }
+ decoder::FoundAst::Found(&InlinedItem::Foreign(ref item)) => {
+ let inlined_root_node_id = find_inlined_item_root(item.id);
+ cache_inlined_item(def_id, item.id, inlined_root_node_id);
+ }
+ decoder::FoundAst::FoundParent(parent_did, item) => {
+ let inlined_root_node_id = find_inlined_item_root(item.id);
+ cache_inlined_item(parent_did, item.id, inlined_root_node_id);
+
+ match item.node {
+ hir::ItemEnum(ref ast_def, _) => {
+ let ast_vs = &ast_def.variants;
+ let ty_vs = &tcx.lookup_adt_def(parent_did).variants;
+ assert_eq!(ast_vs.len(), ty_vs.len());
+ for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) {
+ cache_inlined_item(ty_v.did,
+ ast_v.node.data.id(),
+ inlined_root_node_id);
+ }
+ }
+ hir::ItemStruct(ref struct_def, _) => {
+ if struct_def.is_struct() {
+ bug!("instantiate_inline: called on a non-tuple struct")
+ } else {
+ cache_inlined_item(def_id,
+ struct_def.id(),
+ inlined_root_node_id);
+ }
+ }
+ _ => bug!("instantiate_inline: item has a \
+ non-enum, non-struct parent")
+ }
+ }
+ decoder::FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => {
+ let inlined_root_node_id = find_inlined_item_root(trait_item.id);
+ cache_inlined_item(def_id, trait_item.id, inlined_root_node_id);
+
+ // Associated consts already have to be evaluated in `typeck`, so
+ // the logic to do that already exists in `middle`. In order to
+ // reuse that code, it needs to be able to look up the traits for
+ // inlined items.
+ let ty_trait_item = tcx.impl_or_trait_item(def_id).clone();
+ let trait_item_def_id = tcx.map.local_def_id(trait_item.id);
+ tcx.impl_or_trait_items.borrow_mut()
+ .insert(trait_item_def_id, ty_trait_item);
+ }
+ decoder::FoundAst::Found(&InlinedItem::ImplItem(_, ref impl_item)) => {
+ let inlined_root_node_id = find_inlined_item_root(impl_item.id);
+ cache_inlined_item(def_id, impl_item.id, inlined_root_node_id);
+ }
+ }
+
+ // We can be sure to hit the cache now
+ return self.maybe_get_item_ast(tcx, def_id);
+ }
+
+ fn local_node_for_inlined_defid(&'tcx self, def_id: DefId) -> Option<ast::NodeId> {
+ assert!(!def_id.is_local());
+ match self.inlined_item_cache.borrow().get(&def_id) {
+ Some(&Some(ref cached_inlined_item)) => {
+ Some(cached_inlined_item.item_id)
+ }
+ Some(&None) => {
+ None
+ }
+ _ => {
+ bug!("Trying to lookup inlined NodeId for unexpected item");
+ }
+ }
+ }
+
+ fn defid_for_inlined_node(&'tcx self, node_id: ast::NodeId) -> Option<DefId> {
+ self.defid_for_inlined_node.borrow().get(&node_id).map(|x| *x)
}
fn maybe_get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
visible_parent_map
}
}
+
use rustc::hir::def_id::{DefIndex, DefId};
use rustc::hir::map::DefKey;
use rustc::hir::svh::Svh;
-use rustc::middle::cstore::{ExternCrate};
+use rustc::middle::cstore::ExternCrate;
use rustc::session::config::PanicStrategy;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap};
pub explicitly_linked: Cell<bool>,
}
+pub struct CachedInlinedItem {
+ /// The NodeId of the RootInlinedParent HIR map entry
+ pub inlined_root: ast::NodeId,
+ /// The local NodeId of the inlined entity
+ pub item_id: ast::NodeId,
+}
+
pub struct CStore {
pub dep_graph: DepGraph,
metas: RefCell<FnvHashMap<ast::CrateNum, Rc<CrateMetadata>>>,
used_libraries: RefCell<Vec<(String, NativeLibraryKind)>>,
used_link_args: RefCell<Vec<String>>,
statically_included_foreign_items: RefCell<NodeSet>,
+ pub inlined_item_cache: RefCell<DefIdMap<Option<CachedInlinedItem>>>,
+ pub defid_for_inlined_node: RefCell<NodeMap<DefId>>,
pub visible_parent_map: RefCell<DefIdMap<DefId>>,
}
used_link_args: RefCell::new(Vec::new()),
statically_included_foreign_items: RefCell::new(NodeSet()),
visible_parent_map: RefCell::new(FnvHashMap()),
+ inlined_item_cache: RefCell::new(FnvHashMap()),
+ defid_for_inlined_node: RefCell::new(FnvHashMap()),
}
}
use rustc::hir;
use rustc::session::config::PanicStrategy;
-use middle::cstore::{FoundAst, InlinedItem, LinkagePreference};
+use middle::cstore::{InlinedItem, LinkagePreference};
use middle::cstore::{DefLike, DlDef, DlField, DlImpl, tls};
use rustc::hir::def::Def;
use rustc::hir::def_id::{DefId, DefIndex};
use syntax::attr;
use syntax::parse::token;
use syntax::ast;
-use syntax::abi::Abi;
use syntax::codemap;
use syntax::print::pprust;
use syntax::ptr::P;
maybe_item_name(cdata.lookup_item(id))
}
+pub enum FoundAst<'ast> {
+ Found(&'ast InlinedItem),
+ FoundParent(DefId, &'ast hir::Item),
+ NotFound,
+}
+
pub fn maybe_get_item_ast<'a, 'tcx>(cdata: Cmd, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex)
-> FoundAst<'tcx> {
debug!("Looking up item: {:?}", id);
// an attribute
assert_eq!(meta_items.len(), 1);
let meta_item = meta_items.into_iter().nth(0).unwrap();
- codemap::Spanned {
- node: ast::Attribute_ {
- id: attr::mk_attr_id(),
- style: ast::AttrStyle::Outer,
- value: meta_item,
- is_sugared_doc: is_sugared_doc,
- },
- span: syntax_pos::DUMMY_SP
- }
+ attr::mk_doc_attr_outer(attr::mk_attr_id(), meta_item, is_sugared_doc)
}).collect()
},
None => vec![],
let applicable = match item_family(item_doc) {
ImmStatic | MutStatic => true,
Fn => {
- let ty::TypeScheme { generics, ty } = get_type(cdata, id, tcx);
+ let ty::TypeScheme { generics, .. } = get_type(cdata, id, tcx);
let no_generics = generics.types.is_empty();
- match ty.sty {
- ty::TyFnDef(_, _, fn_ty) | ty::TyFnPtr(fn_ty)
- if fn_ty.abi != Abi::Rust => return no_generics,
- _ => no_generics,
- }
+ no_generics
},
_ => false,
};
use std::u32;
use syntax::abi::Abi;
use syntax::ast::{self, NodeId, Name, CRATE_NODE_ID, CrateNum};
-use syntax::attr;
+use syntax::attr::{self,AttrMetaMethods,AttributeMethods};
use errors::Handler;
use syntax;
use syntax_pos::BytePos;
}
fn encode_meta_item(rbml_w: &mut Encoder, mi: &ast::MetaItem) {
- match mi.node {
- ast::MetaItemKind::Word(ref name) => {
+ if mi.is_word() {
+ let name = mi.name();
rbml_w.start_tag(tag_meta_item_word);
- rbml_w.wr_tagged_str(tag_meta_item_name, name);
+ rbml_w.wr_tagged_str(tag_meta_item_name, &name);
rbml_w.end_tag();
- }
- ast::MetaItemKind::NameValue(ref name, ref value) => {
- match value.node {
- ast::LitKind::Str(ref value, _) => {
- rbml_w.start_tag(tag_meta_item_name_value);
- rbml_w.wr_tagged_str(tag_meta_item_name, name);
- rbml_w.wr_tagged_str(tag_meta_item_value, value);
- rbml_w.end_tag();
- }
- _ => {/* FIXME (#623): encode other variants */ }
- }
- }
- ast::MetaItemKind::List(ref name, ref items) => {
+ } else if mi.is_value_str() {
+ let name = mi.name();
+ /* FIXME (#623): support other literal kinds */
+ let value = mi.value_str().unwrap();
+ rbml_w.start_tag(tag_meta_item_name_value);
+ rbml_w.wr_tagged_str(tag_meta_item_name, &name);
+ rbml_w.wr_tagged_str(tag_meta_item_value, &value);
+ rbml_w.end_tag();
+ } else { // it must be a list
+ let name = mi.name();
+ let items = mi.meta_item_list().unwrap();
rbml_w.start_tag(tag_meta_item_list);
- rbml_w.wr_tagged_str(tag_meta_item_name, name);
+ rbml_w.wr_tagged_str(tag_meta_item_name, &name);
for inner_item in items {
encode_meta_item(rbml_w, &inner_item);
}
rbml_w.end_tag();
- }
}
}
for attr in attrs {
rbml_w.start_tag(tag_attribute);
rbml_w.wr_tagged_u8(tag_attribute_is_sugared_doc, attr.node.is_sugared_doc as u8);
- encode_meta_item(rbml_w, &attr.node.value);
+ encode_meta_item(rbml_w, attr.meta());
rbml_w.end_tag();
}
rbml_w.end_tag();
encode_crate_name(rbml_w, &ecx.link_meta.crate_name);
encode_crate_triple(rbml_w, &ecx.tcx.sess.opts.target_triple);
encode_hash(rbml_w, &ecx.link_meta.crate_hash);
- encode_crate_disambiguator(rbml_w, &ecx.tcx.sess.crate_disambiguator.get().as_str());
+ encode_crate_disambiguator(rbml_w, &ecx.tcx.sess.local_crate_disambiguator());
encode_dylib_dependency_formats(rbml_w, &ecx);
encode_panic_strategy(rbml_w, &ecx);
}
if let (Some(sel), Some(names)) = (import.as_mut(), names) {
for attr in names {
- if let ast::MetaItemKind::Word(ref name) = attr.node {
- sel.insert(name.clone(), attr.span);
+ if attr.is_word() {
+ sel.insert(attr.name().clone(), attr.span());
} else {
- span_err!(self.sess, attr.span, E0466, "bad macro import");
+ span_err!(self.sess, attr.span(), E0466, "bad macro import");
}
}
}
};
for attr in names {
- if let ast::MetaItemKind::Word(ref name) = attr.node {
- reexport.insert(name.clone(), attr.span);
+ if attr.is_word() {
+ reexport.insert(attr.name().clone(), attr.span());
} else {
- call_bad_macro_reexport(self.sess, attr.span);
+ call_bad_macro_reexport(self.sess, attr.span());
}
}
}
build::construct_fn(cx, id, arguments, fn_sig.output, body)
});
- intravisit::walk_fn(self, fk, decl, body, span);
+ intravisit::walk_fn(self, fk, decl, body, span, id);
}
}
let qualif = self.with_mode(mode, |this| {
this.with_euv(Some(fn_id), |euv| euv.walk_fn(fd, b));
- intravisit::walk_fn(this, fk, fd, b, s);
+ intravisit::walk_fn(this, fk, fd, b, s, fn_id);
this.qualif
});
let mut euv = euv::ExprUseVisitor::new(&mut delegate, &infcx);
euv.walk_fn(fd, b);
});
- intravisit::walk_fn(self, fk, fd, b, s)
+ intravisit::walk_fn(self, fk, fd, b, s, fn_id)
}
}
}
}
- intravisit::walk_mod(self, m);
+ intravisit::walk_mod(self, m, id);
}
fn visit_macro_def(&mut self, md: &'v hir::MacroDef) {
Import {
binding: &'a NameBinding<'a>,
directive: &'a ImportDirective<'a>,
- // Some(error) if using this imported name causes the import to be a privacy error
- privacy_error: Option<Box<PrivacyError<'a>>>,
},
}
self.used_crates.insert(krate);
}
- let (directive, privacy_error) = match binding.kind {
- NameBindingKind::Import { directive, ref privacy_error, .. } =>
- (directive, privacy_error),
+ let directive = match binding.kind {
+ NameBindingKind::Import { directive, .. } => directive,
_ => return,
};
- if let Some(error) = privacy_error.as_ref() {
- self.privacy_errors.push((**error).clone());
- }
-
if !self.make_glob_map {
return;
}
impl<'a> ImportDirective<'a> {
// Given the binding to which this directive resolves in a particular namespace,
// this returns the binding for the name this directive defines in that namespace.
- fn import(&'a self, binding: &'a NameBinding<'a>, privacy_error: Option<Box<PrivacyError<'a>>>)
- -> NameBinding<'a> {
+ fn import(&'a self, binding: &'a NameBinding<'a>) -> NameBinding<'a> {
NameBinding {
kind: NameBindingKind::Import {
binding: binding,
directive: self,
- privacy_error: privacy_error,
},
span: self.span,
vis: self.vis,
fn define_in_glob_importers(&self, name: Name, ns: Namespace, binding: &'a NameBinding<'a>) {
if !binding.is_importable() || !binding.is_pseudo_public() { return }
for &(importer, directive) in self.glob_importers.borrow_mut().iter() {
- let _ = importer.try_define_child(name, ns, directive.import(binding, None));
+ let _ = importer.try_define_child(name, ns, directive.import(binding));
}
}
}
span: DUMMY_SP,
vis: ty::Visibility::Public,
});
- let dummy_binding = directive.import(dummy_binding, None);
+ let dummy_binding = directive.import(dummy_binding);
let _ = source_module.try_define_child(target, ValueNS, dummy_binding.clone());
let _ = source_module.try_define_child(target, TypeNS, dummy_binding);
self.resolver.resolve_name_in_module(target_module, source, TypeNS, false, true);
let module_ = self.resolver.current_module;
+ let mut privacy_error = true;
for &(ns, result, determined) in &[(ValueNS, &value_result, value_determined),
(TypeNS, &type_result, type_determined)] {
- if determined.get() { continue }
- if let Indeterminate = *result { continue }
-
- determined.set(true);
- if let Success(binding) = *result {
- if !binding.is_importable() {
+ match *result {
+ Failed(..) if !determined.get() => {
+ determined.set(true);
+ module_.update_resolution(target, ns, |resolution| {
+ resolution.single_imports.directive_failed()
+ });
+ }
+ Success(binding) if !binding.is_importable() => {
let msg = format!("`{}` is not directly importable", target);
span_err!(self.resolver.session, directive.span, E0253, "{}", &msg);
// Do not import this illegal binding. Import a dummy binding and pretend
self.import_dummy_binding(module_, directive);
return Success(());
}
-
- let privacy_error = if !self.resolver.is_accessible(binding.vis) {
- Some(Box::new(PrivacyError(directive.span, source, binding)))
- } else {
- None
- };
-
- let imported_binding = directive.import(binding, privacy_error);
- let conflict = module_.try_define_child(target, ns, imported_binding);
- if let Err(old_binding) = conflict {
- let binding = &directive.import(binding, None);
- self.resolver.report_conflict(module_, target, ns, binding, old_binding);
+ Success(binding) if !self.resolver.is_accessible(binding.vis) => {}
+ Success(binding) if !determined.get() => {
+ determined.set(true);
+ let imported_binding = directive.import(binding);
+ let conflict = module_.try_define_child(target, ns, imported_binding);
+ if let Err(old_binding) = conflict {
+ let binding = &directive.import(binding);
+ self.resolver.report_conflict(module_, target, ns, binding, old_binding);
+ }
+ privacy_error = false;
}
- } else {
- module_.update_resolution(target, ns, |resolution| {
- resolution.single_imports.directive_failed();
- });
+ Success(_) => privacy_error = false,
+ _ => {}
}
}
_ => (),
}
+ if privacy_error {
+ for &(ns, result) in &[(ValueNS, &value_result), (TypeNS, &type_result)] {
+ let binding = match *result { Success(binding) => binding, _ => continue };
+ self.resolver.privacy_errors.push(PrivacyError(directive.span, source, binding));
+ let _ = module_.try_define_child(target, ns, directive.import(binding));
+ }
+ }
+
match (&value_result, &type_result) {
(&Success(binding), _) if !binding.pseudo_vis()
.is_at_least(directive.vis, self.resolver) &&
_ => {}
}
- // Report a privacy error here if all successful namespaces are privacy errors.
- let mut privacy_error = None;
- for &ns in &[ValueNS, TypeNS] {
- privacy_error = match module_.resolve_name(target, ns, true) {
- Success(&NameBinding {
- kind: NameBindingKind::Import { ref privacy_error, .. }, ..
- }) => privacy_error.as_ref().map(|error| (**error).clone()),
- _ => continue,
- };
- if privacy_error.is_none() { break }
- }
- privacy_error.map(|error| self.resolver.privacy_errors.push(error));
-
// Record what this import resolves to for later uses in documentation,
// this may resolve to either a value or a type, but for documentation
// purposes it's good enough to just favor one over the other.
}).collect::<Vec<_>>();
for ((name, ns), binding) in bindings {
if binding.is_importable() && binding.is_pseudo_public() {
- let _ = module_.try_define_child(name, ns, directive.import(binding, None));
+ let _ = module_.try_define_child(name, ns, directive.import(binding));
}
}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This pass is only used for UNIT TESTS related to incremental
+//! compilation. It tests whether a particular `.o` file will be re-used
+//! from a previous compilation or whether it must be regenerated.
+//!
+//! The user adds annotations to the crate of the following form:
+//!
+//! ```
+//! #![rustc_partition_reused(module="spike", cfg="rpass2")]
+//! #![rustc_partition_translated(module="spike-x", cfg="rpass2")]
+//! ```
+//!
+//! The first indicates (in the cfg `rpass2`) that `spike.o` will be
+//! reused, the second that `spike-x.o` will be recreated. If these
+//! annotations are inaccurate, errors are reported.
+//!
+//! The reason that we use `cfg=...` and not `#[cfg_attr]` is so that
+//! the HIR doesn't change as a result of the annotations, which might
+//! perturb the reuse results.
+
+use rustc::ty::TyCtxt;
+use syntax::ast;
+use syntax::attr::AttrMetaMethods;
+use syntax::parse::token::InternedString;
+
+use {ModuleSource, ModuleTranslation};
+
+const PARTITION_REUSED: &'static str = "rustc_partition_reused";
+const PARTITION_TRANSLATED: &'static str = "rustc_partition_translated";
+
+const MODULE: &'static str = "module";
+const CFG: &'static str = "cfg";
+
+#[derive(Debug, PartialEq)]
+enum Disposition { Reused, Translated }
+
+pub fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ modules: &[ModuleTranslation]) {
+ let _ignore = tcx.dep_graph.in_ignore();
+
+ if tcx.sess.opts.incremental.is_none() {
+ return;
+ }
+
+ let ams = AssertModuleSource { tcx: tcx, modules: modules };
+ for attr in &tcx.map.krate().attrs {
+ ams.check_attr(attr);
+ }
+}
+
+struct AssertModuleSource<'a, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ modules: &'a [ModuleTranslation],
+}
+
+impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> {
+ fn check_attr(&self, attr: &ast::Attribute) {
+ let disposition = if attr.check_name(PARTITION_REUSED) {
+ Disposition::Reused
+ } else if attr.check_name(PARTITION_TRANSLATED) {
+ Disposition::Translated
+ } else {
+ return;
+ };
+
+ if !self.check_config(attr) {
+ debug!("check_attr: config does not match, ignoring attr");
+ return;
+ }
+
+ let mname = self.field(attr, MODULE);
+ let mtrans = self.modules.iter().find(|mtrans| &mtrans.name[..] == &mname[..]);
+ let mtrans = match mtrans {
+ Some(m) => m,
+ None => {
+ debug!("module name `{}` not found amongst:", mname);
+ for mtrans in self.modules {
+ debug!("module named `{}` with disposition {:?}",
+ mtrans.name,
+ self.disposition(mtrans));
+ }
+
+ self.tcx.sess.span_err(
+ attr.span,
+ &format!("no module named `{}`", mname));
+ return;
+ }
+ };
+
+ let mtrans_disposition = self.disposition(mtrans);
+ if disposition != mtrans_disposition {
+ self.tcx.sess.span_err(
+ attr.span,
+ &format!("expected module named `{}` to be {:?} but is {:?}",
+ mname,
+ disposition,
+ mtrans_disposition));
+ }
+ }
+
+ fn disposition(&self, mtrans: &ModuleTranslation) -> Disposition {
+ match mtrans.source {
+ ModuleSource::Preexisting(_) => Disposition::Reused,
+ ModuleSource::Translated(_) => Disposition::Translated,
+ }
+ }
+
+ fn field(&self, attr: &ast::Attribute, name: &str) -> InternedString {
+ for item in attr.meta_item_list().unwrap_or(&[]) {
+ if item.check_name(name) {
+ if let Some(value) = item.value_str() {
+ return value;
+ } else {
+ self.tcx.sess.span_fatal(
+ item.span,
+ &format!("associated value expected for `{}`", name));
+ }
+ }
+ }
+
+ self.tcx.sess.span_fatal(
+ attr.span,
+ &format!("no field `{}`", name));
+ }
+
+ /// Scan for a `cfg="foo"` attribute and check whether we have a
+ /// cfg flag called `foo`.
+ fn check_config(&self, attr: &ast::Attribute) -> bool {
+ let config = &self.tcx.map.krate().config;
+ let value = self.field(attr, CFG);
+ debug!("check_config(config={:?}, value={:?})", config, value);
+ if config.iter().any(|c| c.check_name(&value[..])) {
+ debug!("check_config: matched");
+ return true;
+ }
+ debug!("check_config: no match found");
+ return false;
+ }
+
+}
use back::lto;
use back::link::{get_linker, remove};
+use rustc_incremental::save_trans_partition;
use session::config::{OutputFilenames, Passes, SomePasses, AllPasses};
use session::Session;
use session::config::{self, OutputType};
use llvm;
use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef, ContextRef};
use llvm::SMDiagnosticRef;
-use {CrateTranslation, ModuleTranslation};
+use {CrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation};
use util::common::time;
use util::common::path2cstr;
+use util::fs::link_or_copy;
use errors::{self, Handler, Level, DiagnosticBuilder};
use errors::emitter::Emitter;
use syntax_pos::MultiSpan;
+use context::{is_pie_binary, get_reloc_model};
use std::collections::HashMap;
use std::ffi::{CStr, CString};
}
pub fn create_target_machine(sess: &Session) -> TargetMachineRef {
- let reloc_model_arg = match sess.opts.cg.relocation_model {
- Some(ref s) => &s[..],
- None => &sess.target.target.options.relocation_model[..],
- };
- let reloc_model = match reloc_model_arg {
- "pic" => llvm::RelocPIC,
- "static" => llvm::RelocStatic,
- "default" => llvm::RelocDefault,
- "dynamic-no-pic" => llvm::RelocDynamicNoPic,
- _ => {
- sess.err(&format!("{:?} is not a valid relocation mode",
- sess.opts
- .cg
- .relocation_model));
- sess.abort_if_errors();
- bug!();
- }
- };
+ let reloc_model = get_reloc_model(sess);
let opt_level = get_llvm_opt_level(sess.opts.optimize);
let use_softfp = sess.opts.cg.soft_float;
- let any_library = sess.crate_types.borrow().iter().any(|ty| {
- *ty != config::CrateTypeExecutable
- });
-
let ffunction_sections = sess.target.target.options.function_sections;
let fdata_sections = ffunction_sections;
reloc_model,
opt_level,
use_softfp,
- !any_library && reloc_model == llvm::RelocPIC,
+ is_pie_binary(sess),
ffunction_sections,
fdata_sections,
)
remark: Passes,
// Worker thread number
worker: usize,
+ // Directory where incremental data is stored (if any)
+ incremental: Option<PathBuf>,
}
impl<'a> CodegenContext<'a> {
plugin_passes: sess.plugin_llvm_passes.borrow().clone(),
remark: sess.opts.cg.remark.clone(),
worker: 0,
+ incremental: sess.opts.incremental.clone(),
}
}
}
// Unsafe due to LLVM calls.
unsafe fn optimize_and_codegen(cgcx: &CodegenContext,
mtrans: ModuleTranslation,
+ mllvm: ModuleLlvm,
config: ModuleConfig,
output_names: OutputFilenames) {
- let llmod = mtrans.llmod;
- let llcx = mtrans.llcx;
+ let llmod = mllvm.llmod;
+ let llcx = mllvm.llcx;
let tm = config.tm;
// llcx doesn't outlive this function, so we can put this on the stack.
if copy_bc_to_obj {
debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
- if let Err(e) = fs::copy(&bc_out, &obj_out) {
+ if let Err(e) = link_or_copy(&bc_out, &obj_out) {
cgcx.handler.err(&format!("failed to copy bitcode to object file: {}", e));
}
}
pub fn cleanup_llvm(trans: &CrateTranslation) {
for module in trans.modules.iter() {
unsafe {
- llvm::LLVMDisposeModule(module.llmod);
- llvm::LLVMContextDispose(module.llcx);
+ match module.source {
+ ModuleSource::Translated(llvm) => {
+ llvm::LLVMDisposeModule(llvm.llmod);
+ llvm::LLVMContextDispose(llvm.llcx);
+ }
+ ModuleSource::Preexisting(_) => {
+ }
+ }
}
}
}
run_work_multithreaded(sess, work_items, num_workers);
}
+ // If in incr. comp. mode, preserve the `.o` files for potential re-use
+ for mtrans in trans.modules.iter() {
+ let mut files = vec![];
+
+ if modules_config.emit_obj {
+ let path = crate_output.temp_path(OutputType::Object, Some(&mtrans.name));
+ files.push((OutputType::Object, path));
+ }
+
+ if modules_config.emit_bc {
+ let path = crate_output.temp_path(OutputType::Bitcode, Some(&mtrans.name));
+ files.push((OutputType::Bitcode, path));
+ }
+
+ save_trans_partition(sess, &mtrans.name, mtrans.symbol_name_hash, &files);
+ }
+
// All codegen is finished.
unsafe {
llvm::LLVMRustDisposeTargetMachine(tm);
fn execute_work_item(cgcx: &CodegenContext,
work_item: WorkItem) {
unsafe {
- optimize_and_codegen(cgcx,
- work_item.mtrans,
- work_item.config,
- work_item.output_names);
+ match work_item.mtrans.source {
+ ModuleSource::Translated(mllvm) => {
+ debug!("llvm-optimizing {:?}", work_item.mtrans.name);
+ optimize_and_codegen(cgcx,
+ work_item.mtrans,
+ mllvm,
+ work_item.config,
+ work_item.output_names);
+ }
+ ModuleSource::Preexisting(wp) => {
+ let incremental = cgcx.incremental.as_ref().unwrap();
+ let name = &work_item.mtrans.name;
+ for (kind, saved_file) in wp.saved_files {
+ let obj_out = work_item.output_names.temp_path(kind, Some(name));
+ let source_file = incremental.join(&saved_file);
+ debug!("copying pre-existing module `{}` from {:?} to {}",
+ work_item.mtrans.name,
+ source_file,
+ obj_out.display());
+ match link_or_copy(&source_file, &obj_out) {
+ Ok(()) => { }
+ Err(err) => {
+ cgcx.handler.err(&format!("unable to copy {} to {}: {}",
+ source_file.display(),
+ obj_out.display(),
+ err));
+ }
+ }
+ }
+ }
+ }
}
}
let mut tx = Some(tx);
futures.push(rx);
+ let incremental = sess.opts.incremental.clone();
+
thread::Builder::new().name(format!("codegen-{}", i)).spawn(move || {
let diag_handler = Handler::with_emitter(true, false, box diag_emitter);
plugin_passes: plugin_passes,
remark: remark,
worker: i,
+ incremental: incremental,
};
loop {
#![allow(non_camel_case_types)]
use super::CrateTranslation;
+use super::ModuleLlvm;
+use super::ModuleSource;
use super::ModuleTranslation;
+use assert_module_sources;
use back::link;
use back::linker::LinkerInfo;
use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::adjustment::CustomCoerceUnsized;
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::{DepNode, WorkProduct};
use rustc::hir::map as hir_map;
use rustc::util::common::time;
use rustc::mir::mir_map::MirMap;
let instance = Instance::mono(ccx.shared(), main_def_id);
- if !ccx.codegen_unit().items.contains_key(&TransItem::Fn(instance)) {
+ if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) {
// We want to create the wrapper in the same codegen unit as Rust's main
// function.
return;
/// Find any symbols that are defined in one compilation unit, but not declared
/// in any other compilation unit. Give these symbols internal linkage.
-fn internalize_symbols<'a, 'tcx>(ccxs: &CrateContextList<'a, 'tcx>,
+fn internalize_symbols<'a, 'tcx>(sess: &Session,
+ ccxs: &CrateContextList<'a, 'tcx>,
symbol_map: &SymbolMap<'tcx>,
reachable: &FnvHashSet<&str>) {
let scx = ccxs.shared();
let tcx = scx.tcx();
+ // In incr. comp. mode, we can't necessarily see all refs since we
+ // don't generate LLVM IR for reused modules, so skip this
+ // step. Later we should get smarter.
+ if sess.opts.debugging_opts.incremental.is_some() {
+ return;
+ }
+
// 'unsafe' because we are holding on to CStr's from the LLVM module within
// this block.
unsafe {
// Collect all symbols that need to stay externally visible because they
// are referenced via a declaration in some other codegen unit.
- for ccx in ccxs.iter() {
+ for ccx in ccxs.iter_need_trans() {
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
let linkage = llvm::LLVMGetLinkage(val);
// We only care about external declarations (not definitions)
// Examine each external definition. If the definition is not used in
// any other compilation unit, and is not reachable from other crates,
// then give it internal linkage.
- for ccx in ccxs.iter() {
+ for ccx in ccxs.iter_need_trans() {
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
let linkage = llvm::LLVMGetLinkage(val);
"\x01__imp_"
};
unsafe {
- for ccx in cx.iter() {
+ for ccx in cx.iter_need_trans() {
let exported: Vec<_> = iter_globals(ccx.llmod())
.filter(|&val| {
llvm::LLVMGetLinkage(val) ==
let metadata_module = ModuleTranslation {
name: "metadata".to_string(),
- llcx: shared_ccx.metadata_llcx(),
- llmod: shared_ccx.metadata_llmod(),
+ symbol_name_hash: 0, // we always rebuild metadata, at least for now
+ source: ModuleSource::Translated(ModuleLlvm {
+ llcx: shared_ccx.metadata_llcx(),
+ llmod: shared_ccx.metadata_llmod(),
+ }),
};
let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
let symbol_map = Rc::new(symbol_map);
+ let previous_work_products = trans_reuse_previous_work_products(tcx,
+ &codegen_units,
+ &symbol_map);
+
let crate_context_list = CrateContextList::new(&shared_ccx,
codegen_units,
+ previous_work_products,
symbol_map.clone());
- let modules = crate_context_list.iter()
- .map(|ccx| ModuleTranslation {
- name: String::from(&ccx.codegen_unit().name[..]),
- llcx: ccx.llcx(),
- llmod: ccx.llmod()
+ let modules: Vec<_> = crate_context_list.iter_all()
+ .map(|ccx| {
+ let source = match ccx.previous_work_product() {
+ Some(buf) => ModuleSource::Preexisting(buf.clone()),
+ None => ModuleSource::Translated(ModuleLlvm {
+ llcx: ccx.llcx(),
+ llmod: ccx.llmod(),
+ }),
+ };
+
+ ModuleTranslation {
+ name: String::from(ccx.codegen_unit().name()),
+ symbol_name_hash: ccx.codegen_unit().compute_symbol_name_hash(tcx, &symbol_map),
+ source: source,
+ }
})
.collect();
+ assert_module_sources::assert_module_sources(tcx, &modules);
+
// Skip crate items and just output metadata in -Z no-trans mode.
if tcx.sess.opts.no_trans {
let linker_info = LinkerInfo::new(&shared_ccx, &[]);
}
// Instantiate translation items without filling out definitions yet...
- for ccx in crate_context_list.iter() {
- let trans_items = ccx.codegen_unit()
- .items_in_deterministic_order(tcx, &symbol_map);
+ for ccx in crate_context_list.iter_need_trans() {
+ let cgu = ccx.codegen_unit();
+ let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map);
- for (trans_item, linkage) in trans_items {
- trans_item.predefine(&ccx, linkage);
- }
+ tcx.dep_graph.with_task(cgu.work_product_dep_node(), || {
+ for (trans_item, linkage) in trans_items {
+ trans_item.predefine(&ccx, linkage);
+ }
+ });
}
// ... and now that we have everything pre-defined, fill out those definitions.
- for ccx in crate_context_list.iter() {
- let trans_items = ccx.codegen_unit()
- .items_in_deterministic_order(tcx, &symbol_map);
-
- for (trans_item, _) in trans_items {
- trans_item.define(&ccx);
- }
+ for ccx in crate_context_list.iter_need_trans() {
+ let cgu = ccx.codegen_unit();
+ let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map);
+ tcx.dep_graph.with_task(cgu.work_product_dep_node(), || {
+ for (trans_item, _) in trans_items {
+ trans_item.define(&ccx);
+ }
- // If this codegen unit contains the main function, also create the
- // wrapper here
- maybe_create_entry_wrapper(&ccx);
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ maybe_create_entry_wrapper(&ccx);
- // Run replace-all-uses-with for statics that need it
- for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
- unsafe {
- let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
- llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
- llvm::LLVMDeleteGlobal(old_g);
+ // Run replace-all-uses-with for statics that need it
+ for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
+ unsafe {
+ let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
+ llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+ llvm::LLVMDeleteGlobal(old_g);
+ }
}
- }
- // Finalize debuginfo
- if ccx.sess().opts.debuginfo != NoDebugInfo {
- debuginfo::finalize(&ccx);
- }
+ // Finalize debuginfo
+ if ccx.sess().opts.debuginfo != NoDebugInfo {
+ debuginfo::finalize(&ccx);
+ }
+ });
}
symbol_names_test::report_symbol_names(&shared_ccx);
}
time(shared_ccx.sess().time_passes(), "internalize symbols", || {
- internalize_symbols(&crate_context_list,
+ internalize_symbols(sess,
+ &crate_context_list,
&symbol_map,
&reachable_symbols.iter()
.map(|s| &s[..])
}
}
+/// For each CGU, identify if we can reuse an existing object file (or
+/// maybe other context).
+fn trans_reuse_previous_work_products(tcx: TyCtxt,
+ codegen_units: &[CodegenUnit],
+ symbol_map: &SymbolMap)
+ -> Vec<Option<WorkProduct>> {
+ debug!("trans_reuse_previous_work_products()");
+ codegen_units
+ .iter()
+ .map(|cgu| {
+ let id = cgu.work_product_id();
+
+ let hash = cgu.compute_symbol_name_hash(tcx, symbol_map);
+
+ debug!("trans_reuse_previous_work_products: id={:?} hash={}", id, hash);
+
+ if let Some(work_product) = tcx.dep_graph.previous_work_product(&id) {
+ if work_product.input_hash == hash {
+ debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
+ return Some(work_product);
+ } else {
+ debug!("trans_reuse_previous_work_products: \
+ not reusing {:?} because hash changed to {:?}",
+ work_product, hash);
+ }
+ }
+
+ None
+ })
+ .collect()
+}
+
fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
-> (Vec<CodegenUnit<'tcx>>, SymbolMap<'tcx>) {
let time_passes = scx.sess().time_passes();
let mut item_to_cgus = HashMap::new();
for cgu in &codegen_units {
- for (&trans_item, &linkage) in &cgu.items {
+ for (&trans_item, &linkage) in cgu.items() {
item_to_cgus.entry(trans_item)
.or_insert(Vec::new())
- .push((cgu.name.clone(), linkage));
+ .push((cgu.name().clone(), linkage));
}
}
failure_order: AtomicOrdering,
weak: llvm::Bool) -> ValueRef {
unsafe {
- llvm::LLVMBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
+ llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
order, failure_order, weak)
}
}
llfn
}
+fn translating_closure_body_via_mir_will_fail(ccx: &CrateContext,
+ closure_def_id: DefId)
+ -> bool {
+ let default_to_mir = ccx.sess().opts.debugging_opts.orbit;
+ let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" };
+ let use_mir = default_to_mir ^ ccx.tcx().has_attr(closure_def_id, invert);
+
+ !use_mir
+}
+
+pub fn trans_closure_body_via_mir<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ closure_def_id: DefId,
+ closure_substs: ty::ClosureSubsts<'tcx>) {
+ use syntax::ast::DUMMY_NODE_ID;
+ use syntax_pos::DUMMY_SP;
+ use syntax::ptr::P;
+
+ trans_closure_expr(Dest::Ignore(ccx),
+ &hir::FnDecl {
+ inputs: P::new(),
+ output: hir::NoReturn(DUMMY_SP),
+ variadic: false
+ },
+ &hir::Block {
+ stmts: P::new(),
+ expr: None,
+ id: DUMMY_NODE_ID,
+ rules: hir::DefaultBlock,
+ span: DUMMY_SP
+ },
+ DUMMY_NODE_ID,
+ closure_def_id,
+ closure_substs);
+}
+
pub enum Dest<'a, 'tcx: 'a> {
SaveIn(Block<'a, 'tcx>, ValueRef),
Ignore(&'a CrateContext<'a, 'tcx>)
// If we have not done so yet, translate this closure's body
if !ccx.instances().borrow().contains_key(&instance) {
let llfn = get_or_create_closure_declaration(ccx, closure_def_id, closure_substs);
- llvm::SetLinkage(llfn, llvm::WeakODRLinkage);
- llvm::SetUniqueComdat(ccx.llmod(), llfn);
+
+ if ccx.sess().target.target.options.allows_weak_linkage {
+ llvm::SetLinkage(llfn, llvm::WeakODRLinkage);
+ llvm::SetUniqueComdat(ccx.llmod(), llfn);
+ } else {
+ llvm::SetLinkage(llfn, llvm::InternalLinkage);
+ }
// set an inline hint for all closures
attributes::inline(llfn, attributes::InlineAttr::Hint);
// If this is a closure, redirect to it.
let llfn = get_or_create_closure_declaration(ccx, closure_def_id, substs);
+ // If weak linkage is not allowed, we have to make sure that a local,
+ // private copy of the closure is available in this codegen unit
+ if !ccx.sess().target.target.options.allows_weak_linkage &&
+ !ccx.sess().opts.single_codegen_unit() {
+
+ if let Some(node_id) = ccx.tcx().map.as_local_node_id(closure_def_id) {
+ // If the closure is defined in the local crate, we can always just
+ // translate it.
+ let (decl, body) = match ccx.tcx().map.expect_expr(node_id).node {
+ hir::ExprClosure(_, ref decl, ref body, _) => (decl, body),
+ _ => { unreachable!() }
+ };
+
+ trans_closure_expr(Dest::Ignore(ccx),
+ decl,
+ body,
+ node_id,
+ closure_def_id,
+ substs);
+ } else {
+ // If the closure is defined in an upstream crate, we can only
+ // translate it if MIR-trans is active.
+
+ if translating_closure_body_via_mir_will_fail(ccx, closure_def_id) {
+ ccx.sess().fatal("You have run into a known limitation of the \
+ MingW toolchain. Either compile with -Zorbit or \
+ with -Ccodegen-units=1 to work around it.");
+ }
+
+ trans_closure_body_via_mir(ccx, closure_def_id, substs);
+ }
+ }
+
// If the closure is a Fn closure, but a FnOnce is needed (etc),
// then adapt the self type
let llfn_closure_kind = ccx.tcx().closure_kind(closure_def_id);
inlined_vid: ast::NodeId)
-> ty::VariantDef<'tcx>
{
-
let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid);
debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty,
inlined_vid);
}), ..}) => ty,
_ => ctor_ty
}.ty_adt_def().unwrap();
- let inlined_vid_def_id = ccx.tcx().map.local_def_id(inlined_vid);
- adt_def.variants.iter().find(|v| {
- inlined_vid_def_id == v.did ||
- ccx.external().borrow().get(&v.did) == Some(&Some(inlined_vid))
- }).unwrap_or_else(|| {
- bug!("no variant for {:?}::{}", adt_def, inlined_vid)
- })
+ let variant_def_id = if ccx.tcx().map.is_inlined(inlined_vid) {
+ ccx.defid_for_inlined_node(inlined_vid).unwrap()
+ } else {
+ ccx.tcx().map.local_def_id(inlined_vid)
+ };
+
+ adt_def.variants
+ .iter()
+ .find(|v| variant_def_id == v.did)
+ .unwrap_or_else(|| {
+ bug!("no variant for {:?}::{}", adt_def, inlined_vid)
+ })
}
// To avoid UB from LLVM, these two functions mask RHS with an
.get(TransItem::Static(id))
.expect("Local statics should always be in the SymbolMap");
// Make sure that this is never executed for something inlined.
- assert!(!ccx.external_srcs().borrow().contains_key(&id));
+ assert!(!ccx.tcx().map.is_inlined(id));
let defined_in_current_codegen_unit = ccx.codegen_unit()
- .items
+ .items()
.contains_key(&TransItem::Static(id));
if defined_in_current_codegen_unit {
if declare::get_declared_value(ccx, sym).is_none() {
use llvm;
use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
-use rustc::dep_graph::{DepNode, DepTrackingMap, DepTrackingMapConfig};
+use rustc::dep_graph::{DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct};
use middle::cstore::LinkMeta;
use rustc::hir::def::ExportMap;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt};
use session::config::NoDebugInfo;
use session::Session;
+use session::config;
use symbol_map::SymbolMap;
use util::sha2::Sha256;
-use util::nodemap::{NodeMap, NodeSet, DefIdMap, FnvHashMap, FnvHashSet};
+use util::nodemap::{NodeSet, DefIdMap, FnvHashMap, FnvHashSet};
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
pub struct LocalCrateContext<'tcx> {
llmod: ModuleRef,
llcx: ContextRef,
+ previous_work_product: Option<WorkProduct>,
tn: TypeNames, // FIXME: This seems to be largely unused.
codegen_unit: CodegenUnit<'tcx>,
needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>,
fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, (ValueRef, FnType)>>,
- /// Track mapping of external ids to local items imported for inlining
- external: RefCell<DefIdMap<Option<ast::NodeId>>>,
- /// Backwards version of the `external` map (inlined items to where they
- /// came from)
- external_srcs: RefCell<NodeMap<DefId>>,
/// Cache instances of monomorphic and polymorphic items
instances: RefCell<FnvHashMap<Instance<'tcx>, ValueRef>>,
monomorphizing: RefCell<DefIdMap<usize>>,
}
impl<'a, 'tcx: 'a> CrateContextList<'a, 'tcx> {
-
pub fn new(shared_ccx: &'a SharedCrateContext<'a, 'tcx>,
codegen_units: Vec<CodegenUnit<'tcx>>,
+ previous_work_products: Vec<Option<WorkProduct>>,
symbol_map: Rc<SymbolMap<'tcx>>)
-> CrateContextList<'a, 'tcx> {
CrateContextList {
shared: shared_ccx,
- local_ccxs: codegen_units.into_iter().map(|codegen_unit| {
- LocalCrateContext::new(shared_ccx, codegen_unit, symbol_map.clone())
+ local_ccxs: codegen_units.into_iter().zip(previous_work_products).map(|(cgu, wp)| {
+ LocalCrateContext::new(shared_ccx, cgu, wp, symbol_map.clone())
}).collect()
}
}
- pub fn iter<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> {
+ /// Iterate over all crate contexts, whether or not they need
+ /// translation. That is, whether or not a `.o` file is available
+ /// for re-use from a previous incr. comp.).
+ pub fn iter_all<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> {
CrateContextIterator {
shared: self.shared,
index: 0,
- local_ccxs: &self.local_ccxs[..]
+ local_ccxs: &self.local_ccxs[..],
+ filter_to_previous_work_product_unavail: false,
+ }
+ }
+
+ /// Iterator over all CCX that need translation (cannot reuse results from
+ /// previous incr. comp.).
+ pub fn iter_need_trans<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> {
+ CrateContextIterator {
+ shared: self.shared,
+ index: 0,
+ local_ccxs: &self.local_ccxs[..],
+ filter_to_previous_work_product_unavail: true,
}
}
shared: &'a SharedCrateContext<'a, 'tcx>,
local_ccxs: &'a [LocalCrateContext<'tcx>],
index: usize,
+
+ /// if true, only return results where `previous_work_product` is none
+ filter_to_previous_work_product_unavail: bool,
}
impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> {
type Item = CrateContext<'a, 'tcx>;
fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
- if self.index >= self.local_ccxs.len() {
- return None;
- }
+ loop {
+ if self.index >= self.local_ccxs.len() {
+ return None;
+ }
- let index = self.index;
- self.index += 1;
+ let index = self.index;
+ self.index += 1;
- Some(CrateContext {
- shared: self.shared,
- index: index,
- local_ccxs: self.local_ccxs,
- })
+ let ccx = CrateContext {
+ shared: self.shared,
+ index: index,
+ local_ccxs: self.local_ccxs,
+ };
+
+ if
+ self.filter_to_previous_work_product_unavail &&
+ ccx.previous_work_product().is_some()
+ {
+ continue;
+ }
+
+ return Some(ccx);
+ }
}
}
}
}
+pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
+ let reloc_model_arg = match sess.opts.cg.relocation_model {
+ Some(ref s) => &s[..],
+ None => &sess.target.target.options.relocation_model[..],
+ };
+
+ match reloc_model_arg {
+ "pic" => llvm::RelocPIC,
+ "static" => llvm::RelocStatic,
+ "default" => llvm::RelocDefault,
+ "dynamic-no-pic" => llvm::RelocDynamicNoPic,
+ _ => {
+ sess.err(&format!("{:?} is not a valid relocation mode",
+ sess.opts
+ .cg
+ .relocation_model));
+ sess.abort_if_errors();
+ bug!();
+ }
+ }
+}
+
+fn is_any_library(sess: &Session) -> bool {
+ sess.crate_types.borrow().iter().any(|ty| {
+ *ty != config::CrateTypeExecutable
+ })
+}
+
+pub fn is_pie_binary(sess: &Session) -> bool {
+ !is_any_library(sess) && get_reloc_model(sess) == llvm::RelocPIC
+}
+
unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
let llcx = llvm::LLVMContextCreate();
let mod_name = CString::new(mod_name).unwrap();
let data_layout = str::from_utf8(CStr::from_ptr(data_layout).to_bytes())
.ok().expect("got a non-UTF8 data-layout from LLVM");
- if sess.target.target.data_layout != data_layout {
+ // Unfortunately LLVM target specs change over time, and right now we
+ // don't have proper support to work with any more than one
+ // `data_layout` than the one that is in the rust-lang/rust repo. If
+ // this compiler is configured against a custom LLVM, we may have a
+ // differing data layout, even though we should update our own to use
+ // that one.
+ //
+ // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
+ // disable this check entirely as we may be configured with something
+ // that has a different target layout.
+ //
+ // Unsure if this will actually cause breakage when rustc is configured
+ // as such.
+ //
+ // FIXME(#34960)
+ let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
+ let custom_llvm_used = cfg_llvm_root.trim() != "";
+
+ if !custom_llvm_used && sess.target.target.data_layout != data_layout {
bug!("data-layout for builtin `{}` target, `{}`, \
differs from LLVM default, `{}`",
sess.target.target.llvm_target,
let llvm_target = sess.target.target.llvm_target.as_bytes();
let llvm_target = CString::new(llvm_target).unwrap();
llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
+
+ if is_pie_binary(sess) {
+ llvm::LLVMRustSetModulePIELevel(llmod);
+ }
+
(llcx, llmod)
}
impl<'tcx> LocalCrateContext<'tcx> {
fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
codegen_unit: CodegenUnit<'tcx>,
+ previous_work_product: Option<WorkProduct>,
symbol_map: Rc<SymbolMap<'tcx>>)
-> LocalCrateContext<'tcx> {
unsafe {
// crashes if the module identifier is same as other symbols
// such as a function name in the module.
// 1. http://llvm.org/bugs/show_bug.cgi?id=11479
- let llmod_id = format!("{}.rs", codegen_unit.name);
+ let llmod_id = format!("{}.rs", codegen_unit.name());
let (llcx, llmod) = create_context_and_module(&shared.tcx.sess,
&llmod_id[..]);
let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
- Some(debuginfo::CrateDebugContext::new(llmod))
+ let dctx = debuginfo::CrateDebugContext::new(llmod);
+ debuginfo::metadata::compile_unit_metadata(shared, &dctx, shared.tcx.sess);
+ Some(dctx)
} else {
None
};
let local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
+ previous_work_product: previous_work_product,
codegen_unit: codegen_unit,
tn: TypeNames::new(),
needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()),
fn_pointer_shims: RefCell::new(FnvHashMap()),
drop_glues: RefCell::new(FnvHashMap()),
- external: RefCell::new(DefIdMap()),
- external_srcs: RefCell::new(NodeMap()),
instances: RefCell::new(FnvHashMap()),
monomorphizing: RefCell::new(DefIdMap()),
vtables: RefCell::new(FnvHashMap()),
self.local().llcx
}
+ pub fn previous_work_product(&self) -> Option<&WorkProduct> {
+ self.local().previous_work_product.as_ref()
+ }
+
pub fn codegen_unit(&self) -> &CodegenUnit<'tcx> {
&self.local().codegen_unit
}
&self.local().drop_glues
}
- pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
- &self.local().external
+ pub fn local_node_for_inlined_defid<'a>(&'a self, def_id: DefId) -> Option<ast::NodeId> {
+ self.sess().cstore.local_node_for_inlined_defid(def_id)
}
- pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<DefId>> {
- &self.local().external_srcs
+ pub fn defid_for_inlined_node<'a>(&'a self, node_id: ast::NodeId) -> Option<DefId> {
+ self.sess().cstore.defid_for_inlined_node(node_id)
}
pub fn instances<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
fn_should_be_ignored, is_node_local_to_unit};
use super::namespace::mangled_name_of_item;
use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name};
-use super::{declare_local, VariableKind, VariableAccess};
+use super::{declare_local, VariableKind, VariableAccess, CrateDebugContext};
+use context::SharedCrateContext;
+use session::Session;
use llvm::{self, ValueRef};
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
use syntax::parse::token;
use syntax_pos::{self, Span};
-
// From DWARF 5.
// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1
const DW_LANG_RUST: c_uint = 0x1c;
pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
// ptr::null() doesn't work :(
-pub const NO_FILE_METADATA: DIFile = (0 as DIFile);
pub const NO_SCOPE_METADATA: DIScope = (0 as DIScope);
const FLAGS_NONE: c_uint = 0;
// First, find out the 'real' def_id of the type. Items inlined from
// other crates have to be mapped back to their source.
let def_id = if let Some(node_id) = cx.tcx().map.as_local_node_id(def_id) {
- match cx.external_srcs().borrow().get(&node_id).cloned() {
- Some(source_def_id) => {
- // The given def_id identifies the inlined copy of a
- // type definition, let's take the source of the copy.
- source_def_id
- }
- None => def_id
+ if cx.tcx().map.is_inlined(node_id) {
+ // The given def_id identifies the inlined copy of a
+ // type definition, let's take the source of the copy.
+ cx.defid_for_inlined_node(node_id).unwrap()
+ } else {
+ def_id
}
} else {
def_id
unsafe {
llvm::LLVMDIBuilderCreateSubroutineType(
DIB(cx),
- NO_FILE_METADATA,
+ unknown_file_metadata(cx),
create_DIArray(DIB(cx), &signature_metadata[..]))
},
false);
let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
let trait_llvm_type = type_of::type_of(cx, trait_object_type);
+ let file_metadata = unknown_file_metadata(cx);
composite_type_metadata(cx,
trait_llvm_type,
unique_type_id,
&[],
containing_scope,
- NO_FILE_METADATA,
+ file_metadata,
syntax_pos::DUMMY_SP)
}
return ptr_metadata;
}
-pub fn compile_unit_metadata(cx: &CrateContext) -> DIDescriptor {
- let work_dir = &cx.sess().working_dir;
- let compile_unit_name = match cx.sess().local_crate_source_file {
- None => fallback_path(cx),
+pub fn compile_unit_metadata(scc: &SharedCrateContext,
+ debug_context: &CrateDebugContext,
+ sess: &Session)
+ -> DIDescriptor {
+ let work_dir = &sess.working_dir;
+ let compile_unit_name = match sess.local_crate_source_file {
+ None => fallback_path(scc),
Some(ref abs_path) => {
if abs_path.is_relative() {
- cx.sess().warn("debuginfo: Invalid path to crate's local root source file!");
- fallback_path(cx)
+ sess.warn("debuginfo: Invalid path to crate's local root source file!");
+ fallback_path(scc)
} else {
match abs_path.strip_prefix(work_dir) {
Ok(ref p) if p.is_relative() => {
path2cstr(&Path::new(".").join(p))
}
}
- _ => fallback_path(cx)
+ _ => fallback_path(scc)
}
}
}
let split_name = "\0";
return unsafe {
llvm::LLVMDIBuilderCreateCompileUnit(
- debug_context(cx).builder,
+ debug_context.builder,
DW_LANG_RUST,
compile_unit_name,
work_dir.as_ptr(),
producer.as_ptr(),
- cx.sess().opts.optimize != config::OptLevel::No,
+ sess.opts.optimize != config::OptLevel::No,
flags.as_ptr() as *const _,
0,
split_name.as_ptr() as *const _)
};
- fn fallback_path(cx: &CrateContext) -> CString {
- CString::new(cx.link_meta().crate_name.clone()).unwrap()
+ fn fallback_path(scc: &SharedCrateContext) -> CString {
+ CString::new(scc.link_meta().crate_name.clone()).unwrap()
}
}
DIB(cx),
containing_scope,
name.as_ptr(),
- NO_FILE_METADATA,
+ file_metadata,
UNKNOWN_LINE_NUMBER,
bytes_to_bits(discriminant_size),
bytes_to_bits(discriminant_align),
DIB(cx),
composite_type_metadata,
member_name.as_ptr(),
- NO_FILE_METADATA,
+ unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
bytes_to_bits(member_size),
bytes_to_bits(member_align),
DIB(cx),
containing_scope,
name.as_ptr(),
- NO_FILE_METADATA,
+ unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
bytes_to_bits(struct_size),
bytes_to_bits(struct_align),
// crate should already contain debuginfo for it. More importantly, the
// global might not even exist in un-inlined form anywhere which would lead
// to a linker errors.
- if cx.external_srcs().borrow().contains_key(&node_id) {
+ if cx.tcx().map.is_inlined(node_id) {
return;
}
let loc = span_start(cx, span);
(file_metadata(cx, &loc.file.name, &loc.file.abs_path), loc.line as c_uint)
} else {
- (NO_FILE_METADATA, UNKNOWN_LINE_NUMBER)
+ (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER)
};
let is_local_to_unit = is_node_local_to_unit(cx, node_id);
use self::namespace::mangled_name_of_item;
use self::type_names::compute_debuginfo_type_name;
use self::metadata::{type_metadata, diverging_type_metadata};
-use self::metadata::{file_metadata, scope_metadata, TypeMap, compile_unit_metadata};
+use self::metadata::{file_metadata, scope_metadata, TypeMap};
use self::source_loc::InternalDebugLocation::{self, UnknownLocation};
use llvm;
mod utils;
mod namespace;
mod type_names;
-mod metadata;
+pub mod metadata;
mod create_scope_map;
mod source_loc;
}
debug!("finalize");
- let _ = compile_unit_metadata(cx);
if gdb::needs_gdb_debug_scripts_section(cx) {
// Add a .debug_gdb_scripts section to this compile-unit. This will
});
// Try to get some span information, if we have an inlined item.
- let definition_span = match cx.external().borrow().get(&instance.def) {
- Some(&Some(node_id)) => cx.tcx().map.span(node_id),
- _ => cx.tcx().map.def_id_span(instance.def, syntax_pos::DUMMY_SP)
- };
+ let definition_span = cx.tcx()
+ .map
+ .def_id_span(instance.def, syntax_pos::DUMMY_SP);
(containing_scope, definition_span)
}
// Namespace Handling.
-use super::metadata::{file_metadata, NO_FILE_METADATA, UNKNOWN_LINE_NUMBER};
+use super::metadata::{file_metadata, unknown_file_metadata, UNKNOWN_LINE_NUMBER};
use super::utils::{DIB, debug_context, span_start};
use llvm;
let loc = span_start(ccx, span);
(file_metadata(ccx, &loc.file.name, &loc.file.abs_path), loc.line as c_uint)
} else {
- (NO_FILE_METADATA, UNKNOWN_LINE_NUMBER)
+ (unknown_file_metadata(ccx), UNKNOWN_LINE_NUMBER)
};
let scope = unsafe {
});
// Try to get some span information, if we have an inlined item.
- let definition_span = match cx.external().borrow().get(&def_id) {
- Some(&Some(node_id)) => cx.tcx().map.span(node_id),
- _ => cx.tcx().map.def_id_span(def_id, syntax_pos::DUMMY_SP)
- };
+ let definition_span = cx.tcx().map.def_id_span(def_id, syntax_pos::DUMMY_SP);
(containing_scope, definition_span)
}
Falling back to on-demand instantiation.",
g,
TransItem::DropGlue(g).to_raw_string(),
- ccx.codegen_unit().name);
+ ccx.codegen_unit().name());
ccx.stats().n_fallback_instantiations.set(ccx.stats()
.n_fallback_instantiations
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use middle::cstore::{FoundAst, InlinedItem};
use rustc::hir::def_id::DefId;
use base::push_ctxt;
use common::*;
use monomorphize::Instance;
use rustc::dep_graph::DepNode;
-use rustc::hir;
fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
debug!("instantiate_inline({:?})", fn_id);
let tcx = ccx.tcx();
let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id));
- match ccx.external().borrow().get(&fn_id) {
- Some(&Some(node_id)) => {
- // Already inline
- debug!("instantiate_inline({}): already inline as node id {}",
- tcx.item_path_str(fn_id), node_id);
- let node_def_id = tcx.map.local_def_id(node_id);
- return Some(node_def_id);
- }
- Some(&None) => {
- return None; // Not inlinable
- }
- None => {
- // Not seen yet
- }
- }
-
- let inlined = tcx.sess.cstore.maybe_get_item_ast(tcx, fn_id);
- let inline_id = match inlined {
- FoundAst::NotFound => {
- ccx.external().borrow_mut().insert(fn_id, None);
- return None;
- }
- FoundAst::Found(&InlinedItem::Item(ref item)) => {
- ccx.external().borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
-
- ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
-
- item.id
- }
- FoundAst::Found(&InlinedItem::Foreign(ref item)) => {
- ccx.external().borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
- item.id
- }
- FoundAst::FoundParent(parent_id, item) => {
- ccx.external().borrow_mut().insert(parent_id, Some(item.id));
- ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
-
- let mut my_id = 0;
- match item.node {
- hir::ItemEnum(ref ast_def, _) => {
- let ast_vs = &ast_def.variants;
- let ty_vs = &tcx.lookup_adt_def(parent_id).variants;
- assert_eq!(ast_vs.len(), ty_vs.len());
- for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) {
- if ty_v.did == fn_id { my_id = ast_v.node.data.id(); }
- ccx.external().borrow_mut().insert(ty_v.did, Some(ast_v.node.data.id()));
- ccx.external_srcs().borrow_mut().insert(ast_v.node.data.id(), ty_v.did);
- }
- }
- hir::ItemStruct(ref struct_def, _) => {
- if struct_def.is_struct() {
- bug!("instantiate_inline: called on a \
- non-tuple struct")
- } else {
- ccx.external().borrow_mut().insert(fn_id, Some(struct_def.id()));
- ccx.external_srcs().borrow_mut().insert(struct_def.id(), fn_id);
- my_id = struct_def.id();
- }
- }
- _ => bug!("instantiate_inline: item has a \
- non-enum, non-struct parent")
- }
- my_id
- }
- FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => {
- ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id));
- ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id);
-
- ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
-
- // Associated consts already have to be evaluated in `typeck`, so
- // the logic to do that already exists in `middle`. In order to
- // reuse that code, it needs to be able to look up the traits for
- // inlined items.
- let ty_trait_item = tcx.impl_or_trait_item(fn_id).clone();
- let trait_item_def_id = tcx.map.local_def_id(trait_item.id);
- tcx.impl_or_trait_items.borrow_mut()
- .insert(trait_item_def_id, ty_trait_item);
-
- // If this is a default method, we can't look up the
- // impl type. But we aren't going to translate anyways, so
- // don't.
- trait_item.id
- }
- FoundAst::Found(&InlinedItem::ImplItem(_, ref impl_item)) => {
- ccx.external().borrow_mut().insert(fn_id, Some(impl_item.id));
- ccx.external_srcs().borrow_mut().insert(impl_item.id, fn_id);
-
- ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
-
- impl_item.id
- }
- };
-
- let inline_def_id = tcx.map.local_def_id(inline_id);
- Some(inline_def_id)
+ tcx.sess
+ .cstore
+ .maybe_get_item_ast(tcx, fn_id)
+ .map(|(_, inline_id)| {
+ tcx.map.local_def_id(inline_id)
+ })
}
pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId)
use rustc::ty::subst::FnSpace;
use abi::{Abi, FnType};
use adt;
-use attributes;
use base::*;
use build::*;
use callee::{self, Callee};
use type_::Type;
use rustc::ty::{self, Ty};
use Disr;
-use rustc::ty::subst::Substs;
use rustc::hir;
use syntax::ast;
use syntax::ptr::P;
dloc: DebugLoc) -> Block<'blk, 'tcx> {
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
let ccx = bcx.ccx();
- let tcx = ccx.tcx();
let dloc = DebugLoc::None;
// Translates the shims described above:
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
- attributes::emit_uwtable(bcx.fcx.llfn, true);
- let target = &bcx.sess().target.target;
- let catch_pers = if target.arch == "arm" && target.target_os != "ios" {
- // Only ARM still uses a separate catch personality (for now)
- match tcx.lang_items.eh_personality_catch() {
- Some(did) => {
- Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
- }
- None => bug!("eh_personality_catch not defined"),
- }
- } else {
- bcx.fcx.eh_personality()
- };
-
let then = bcx.fcx.new_temp_block("then");
let catch = bcx.fcx.new_temp_block("catch");
// rust_try ignores the selector.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false);
- let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
+ let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1);
AddClause(catch, vals, C_null(Type::i8p(ccx)));
let ptr = ExtractValue(catch, vals, 0);
Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
#![feature(unicode)]
#![feature(question_mark)]
+use rustc::dep_graph::WorkProduct;
+
extern crate arena;
extern crate flate;
extern crate getopts;
mod abi;
mod adt;
mod asm;
+mod assert_module_sources;
mod attributes;
mod base;
mod basic_block;
#[derive(Clone)]
pub struct ModuleTranslation {
+ /// The name of the module. When the crate may be saved between
+ /// compilations, incremental compilation requires that name be
+ /// unique amongst **all** crates. Therefore, it should contain
+ /// something unique to this crate (e.g., a module path) as well
+ /// as the crate name and disambiguator.
pub name: String,
+ pub symbol_name_hash: u64,
+ pub source: ModuleSource,
+}
+
+#[derive(Clone)]
+pub enum ModuleSource {
+ /// Copy the `.o` files or whatever from the incr. comp. directory.
+ Preexisting(WorkProduct),
+
+ /// Rebuild from this LLVM module.
+ Translated(ModuleLlvm),
+}
+
+#[derive(Copy, Clone)]
+pub struct ModuleLlvm {
pub llcx: llvm::ContextRef,
pub llmod: llvm::ModuleRef,
}
// After this point, bcx is the block for the call to panic.
bcx = panic_block.build();
+ debug_loc.apply_to_bcx(&bcx);
// Get the location information.
let loc = bcx.sess().codemap().lookup_char_pos(span.lo);
// FIXME Shouldn't need to manually trigger closure instantiations.
if let mir::AggregateKind::Closure(def_id, substs) = *kind {
- use rustc::hir;
- use syntax::ast::DUMMY_NODE_ID;
- use syntax::ptr::P;
use closure;
-
- closure::trans_closure_expr(closure::Dest::Ignore(self.ccx),
- &hir::FnDecl {
- inputs: P::new(),
- output: hir::NoReturn(DUMMY_SP),
- variadic: false
- },
- &hir::Block {
- stmts: P::new(),
- expr: None,
- id: DUMMY_NODE_ID,
- rules: hir::DefaultBlock,
- span: DUMMY_SP
- },
- DUMMY_NODE_ID, def_id,
- self.monomorphize(&substs));
+ closure::trans_closure_body_via_mir(self.ccx,
+ def_id,
+ self.monomorphize(&substs));
}
let val = if let mir::AggregateKind::Adt(adt_def, index, _) = *kind {
_ => {
// FIXME Shouldn't need to manually trigger closure instantiations.
if let mir::AggregateKind::Closure(def_id, substs) = *kind {
- use rustc::hir;
- use syntax::ast::DUMMY_NODE_ID;
- use syntax::ptr::P;
- use syntax_pos::DUMMY_SP;
use closure;
- closure::trans_closure_expr(closure::Dest::Ignore(bcx.ccx()),
- &hir::FnDecl {
- inputs: P::new(),
- output: hir::NoReturn(DUMMY_SP),
- variadic: false
- },
- &hir::Block {
- stmts: P::new(),
- expr: None,
- id: DUMMY_NODE_ID,
- rules: hir::DefaultBlock,
- span: DUMMY_SP
- },
- DUMMY_NODE_ID, def_id,
- bcx.monomorphize(&substs));
+ closure::trans_closure_body_via_mir(bcx.ccx(),
+ def_id,
+ bcx.monomorphize(&substs));
}
for (i, operand) in operands.iter().enumerate() {
}
}
}
- mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
+ mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
+ let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
+ let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
+ if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
+ if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
+ let ll_cft = ll_cast_ty.field_types();
+ let ll_fft = ll_from_ty.field_types();
+ let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
+ assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
+ OperandValue::Pair(data_cast, meta_ptr)
+ } else { // cast to thin-ptr
+ // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+ // pointer-cast of that pointer to desired pointer type.
+ let llval = bcx.pointercast(data_ptr, ll_cast_ty);
+ OperandValue::Immediate(llval)
+ }
+ } else {
+ bug!("Unexpected non-Pair operand")
+ }
+ }
+ mir::CastKind::Misc => {
debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
- let llval = operand.immediate();
- let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
+ let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
let repr = adt::represent_type(bcx.ccx(), operand.ty);
- adt::is_discr_signed(&repr)
+ let discr = match operand.val {
+ OperandValue::Immediate(llval) => llval,
+ OperandValue::Ref(llptr) => {
+ bcx.with_block(|bcx| {
+ adt::trans_get_discr(bcx, &repr, llptr, None, true)
+ })
+ }
+ OperandValue::Pair(..) => bug!("Unexpected Pair operand")
+ };
+ (discr, adt::is_discr_signed(&repr))
} else {
- operand.ty.is_signed()
+ (operand.immediate(), operand.ty.is_signed())
};
let newval = match (r_t_in, r_t_out) {
};
OperandValue::Immediate(newval)
}
- mir::CastKind::Misc => { // Casts from a fat-ptr.
- let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
- let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
- if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
- if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
- let ll_cft = ll_cast_ty.field_types();
- let ll_fft = ll_from_ty.field_types();
- let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
- assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
- OperandValue::Pair(data_cast, meta_ptr)
- } else { // cast to thin-ptr
- // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
- // pointer-cast of that pointer to desired pointer type.
- let llval = bcx.pointercast(data_ptr, ll_cast_ty);
- OperandValue::Immediate(llval)
- }
- } else {
- bug!("Unexpected non-Pair operand")
- }
- }
};
let operand = OperandRef {
val: val,
debug!("leaving monomorphic fn {:?}", instance);
return (val, mono_ty);
} else {
- assert!(!ccx.codegen_unit().items.contains_key(&TransItem::Fn(instance)));
+ assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance)));
}
debug!("monomorphic_fn({:?})", instance);
use collector::InliningMap;
use llvm;
use monomorphize;
+use rustc::dep_graph::{DepNode, WorkProductId};
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER;
use rustc::ty::TyCtxt;
use rustc::ty::item_path::characteristic_def_id_of_type;
use std::cmp::Ordering;
+use std::hash::{Hash, Hasher, SipHasher};
+use std::sync::Arc;
use symbol_map::SymbolMap;
use syntax::ast::NodeId;
use syntax::parse::token::{self, InternedString};
}
pub struct CodegenUnit<'tcx> {
- pub name: InternedString,
- pub items: FnvHashMap<TransItem<'tcx>, llvm::Linkage>,
+ /// A name for this CGU. Incremental compilation requires that
+ /// name be unique amongst **all** crates. Therefore, it should
+ /// contain something unique to this crate (e.g., a module path)
+ /// as well as the crate name and disambiguator.
+ name: InternedString,
+
+ items: FnvHashMap<TransItem<'tcx>, llvm::Linkage>,
}
impl<'tcx> CodegenUnit<'tcx> {
+ pub fn new(name: InternedString,
+ items: FnvHashMap<TransItem<'tcx>, llvm::Linkage>)
+ -> Self {
+ CodegenUnit {
+ name: name,
+ items: items,
+ }
+ }
+
+ pub fn empty(name: InternedString) -> Self {
+ Self::new(name, FnvHashMap())
+ }
+
+ pub fn contains_item(&self, item: &TransItem<'tcx>) -> bool {
+ self.items.contains_key(item)
+ }
+
+ pub fn name(&self) -> &str {
+ &self.name
+ }
+
+ pub fn items(&self) -> &FnvHashMap<TransItem<'tcx>, llvm::Linkage> {
+ &self.items
+ }
+
+ pub fn work_product_id(&self) -> Arc<WorkProductId> {
+ Arc::new(WorkProductId(self.name().to_string()))
+ }
+
+ pub fn work_product_dep_node(&self) -> DepNode<DefId> {
+ DepNode::WorkProduct(self.work_product_id())
+ }
+
+ pub fn compute_symbol_name_hash(&self, tcx: TyCtxt, symbol_map: &SymbolMap) -> u64 {
+ let mut state = SipHasher::new();
+ let all_items = self.items_in_deterministic_order(tcx, symbol_map);
+ for (item, _) in all_items {
+ let symbol_name = symbol_map.get(item).unwrap();
+ symbol_name.hash(&mut state);
+ }
+ state.finish()
+ }
+
pub fn items_in_deterministic_order(&self,
tcx: TyCtxt,
symbol_map: &SymbolMap)
};
let make_codegen_unit = || {
- CodegenUnit {
- name: codegen_unit_name.clone(),
- items: FnvHashMap(),
- }
+ CodegenUnit::empty(codegen_unit_name.clone())
};
let mut codegen_unit = codegen_units.entry(codegen_unit_name.clone())
if codegen_units.is_empty() {
let codegen_unit_name = InternedString::new(FALLBACK_CODEGEN_UNIT);
codegen_units.entry(codegen_unit_name.clone())
- .or_insert_with(|| CodegenUnit {
- name: codegen_unit_name.clone(),
- items: FnvHashMap(),
- });
+ .or_insert_with(|| CodegenUnit::empty(codegen_unit_name.clone()));
}
PreInliningPartitioning {
// we reach the target count
while codegen_units.len() < target_cgu_count {
let index = codegen_units.len();
- codegen_units.push(CodegenUnit {
- name: numbered_codegen_unit_name(crate_name, index),
- items: FnvHashMap()
- });
+ codegen_units.push(
+ CodegenUnit::empty(numbered_codegen_unit_name(crate_name, index)));
}
}
follow_inlining(*root, inlining_map, &mut reachable);
}
- let mut new_codegen_unit = CodegenUnit {
- name: codegen_unit.name.clone(),
- items: FnvHashMap(),
- };
+ let mut new_codegen_unit =
+ CodegenUnit::empty(codegen_unit.name.clone());
// Add all translation items that are not already there
for trans_item in reachable {
items.insert(trans_item, linkage);
}
- CodegenUnit {
- name: numbered_codegen_unit_name(&tcx.crate_name[..], 0),
- items: items
- }
+ CodegenUnit::new(
+ numbered_codegen_unit_name(&tcx.crate_name[..], 0),
+ items)
}
fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString {
use llvm;
use monomorphize::{self, Instance};
use inline;
+use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::map as hir_map;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::subst;
-use rustc::dep_graph::DepNode;
use rustc_const_eval::fatal_const_eval_err;
use std::hash::{Hash, Hasher};
use syntax::ast::{self, NodeId};
impl<'a, 'tcx> TransItem<'tcx> {
pub fn define(&self, ccx: &CrateContext<'a, 'tcx>) {
-
debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}",
self.to_string(ccx.tcx()),
self.to_raw_string(),
- ccx.codegen_unit().name);
+ ccx.codegen_unit().name());
+
+ // (*) This code executes in the context of a dep-node for the
+ // entire CGU. In some cases, we introduce dep-nodes for
+ // particular items that we are translating (these nodes will
+ // have read edges coming into the CGU node). These smaller
+ // nodes are not needed for correctness -- we always
+ // invalidate an entire CGU at a time -- but they enable
+ // finer-grained testing, since you can write tests that check
+ // that the incoming edges to a particular fn are from a
+ // particular set.
self.register_reads(ccx);
match *self {
TransItem::Static(node_id) => {
+ let def_id = ccx.tcx().map.local_def_id(node_id);
+ let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*)
let item = ccx.tcx().map.expect_item(node_id);
if let hir::ItemStatic(_, m, ref expr) = item.node {
match consts::trans_static(&ccx, m, expr, item.id, &item.attrs) {
}
}
TransItem::Fn(instance) => {
+ let _task = ccx.tcx().dep_graph.in_task(
+ DepNode::TransCrateItem(instance.def)); // (*)
+
base::trans_instance(&ccx, instance);
}
TransItem::DropGlue(dg) => {
debug!("END IMPLEMENTING '{} ({})' in cgu {}",
self.to_string(ccx.tcx()),
self.to_raw_string(),
- ccx.codegen_unit().name);
+ ccx.codegen_unit().name());
}
/// If necessary, creates a subtask for trans'ing a particular item and registers reads on
debug!("BEGIN PREDEFINING '{} ({})' in cgu {}",
self.to_string(ccx.tcx()),
self.to_raw_string(),
- ccx.codegen_unit().name);
+ ccx.codegen_unit().name());
let symbol_name = ccx.symbol_map()
.get_or_compute(ccx.shared(), *self);
debug!("END PREDEFINING '{} ({})' in cgu {}",
self.to_string(ccx.tcx()),
self.to_raw_string(),
- ccx.codegen_unit().name);
+ ccx.codegen_unit().name());
}
fn predefine_static(ccx: &CrateContext<'a, 'tcx>,
// item is declared.
let bound = match (&ty.sty, ty_path_def) {
(_, Def::SelfTy(Some(trait_did), Some(impl_id))) => {
+ // For Def::SelfTy() values inlined from another crate, the
+ // impl_id will be DUMMY_NODE_ID, which would cause problems
+ // here. But we should never run into an impl from another crate
+ // in this pass.
+ assert!(impl_id != ast::DUMMY_NODE_ID);
+
// `Self` in an impl of a trait - we have a concrete self type and a
// trait reference.
let trait_ref = tcx.impl_trait_ref(tcx.map.local_def_id(impl_id)).unwrap();
}
Def::SelfTy(_, Some(impl_id)) => {
// Self in impl (we know the concrete type).
+
+ // For Def::SelfTy() values inlined from another crate, the
+ // impl_id will be DUMMY_NODE_ID, which would cause problems
+ // here. But we should never run into an impl from another crate
+ // in this pass.
+ assert!(impl_id != ast::DUMMY_NODE_ID);
+
tcx.prohibit_type_params(base_segments);
let ty = tcx.node_id_to_type(impl_id);
if let Some(free_substs) = self.get_free_substs() {
/// to `trait_id` (this only cares about the trait, not the specific
/// method that is called)
pub fn check_legal_trait_for_method_call(ccx: &CrateCtxt, span: Span, trait_id: DefId) {
- let tcx = ccx.tcx;
- let did = Some(trait_id);
- let li = &tcx.lang_items;
-
- if did == li.drop_trait() {
- span_err!(tcx.sess, span, E0040, "explicit use of destructor method");
- } else if !tcx.sess.features.borrow().unboxed_closures {
- // the #[feature(unboxed_closures)] feature isn't
- // activated so we need to enforce the closure
- // restrictions.
-
- let method = if did == li.fn_trait() {
- "call"
- } else if did == li.fn_mut_trait() {
- "call_mut"
- } else if did == li.fn_once_trait() {
- "call_once"
- } else {
- return // not a closure method, everything is OK.
- };
-
- struct_span_err!(tcx.sess, span, E0174,
- "explicit use of unboxed closure method `{}` is experimental",
- method)
- .help("add `#![feature(unboxed_closures)]` to the crate \
- attributes to enable")
- .emit();
+ if ccx.tcx.lang_items.drop_trait() == Some(trait_id) {
+ span_err!(ccx.tcx.sess, span, E0040, "explicit use of destructor method");
}
}
use middle::mem_categorization::Categorization;
use rustc::ty::{self, Ty};
use rustc::infer::UpvarRegion;
-use std::collections::HashSet;
use syntax::ast;
use syntax_pos::Span;
use rustc::hir;
use rustc::hir::intravisit::{self, Visitor};
+use rustc::util::nodemap::NodeMap;
///////////////////////////////////////////////////////////////////////////
// PUBLIC ENTRY POINTS
pub fn closure_analyze_fn(&self, body: &hir::Block) {
let mut seed = SeedBorrowKind::new(self);
seed.visit_block(body);
- let closures_with_inferred_kinds = seed.closures_with_inferred_kinds;
- let mut adjust = AdjustBorrowKind::new(self, &closures_with_inferred_kinds);
+ let mut adjust = AdjustBorrowKind::new(self, seed.temp_closure_kinds);
adjust.visit_block(body);
// it's our job to process these.
pub fn closure_analyze_const(&self, body: &hir::Expr) {
let mut seed = SeedBorrowKind::new(self);
seed.visit_expr(body);
- let closures_with_inferred_kinds = seed.closures_with_inferred_kinds;
- let mut adjust = AdjustBorrowKind::new(self, &closures_with_inferred_kinds);
+ let mut adjust = AdjustBorrowKind::new(self, seed.temp_closure_kinds);
adjust.visit_expr(body);
// it's our job to process these.
struct SeedBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- closures_with_inferred_kinds: HashSet<ast::NodeId>,
+ temp_closure_kinds: NodeMap<ty::ClosureKind>,
}
impl<'a, 'gcx, 'tcx, 'v> Visitor<'v> for SeedBorrowKind<'a, 'gcx, 'tcx> {
impl<'a, 'gcx, 'tcx> SeedBorrowKind<'a, 'gcx, 'tcx> {
fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>) -> SeedBorrowKind<'a, 'gcx, 'tcx> {
- SeedBorrowKind { fcx: fcx, closures_with_inferred_kinds: HashSet::new() }
+ SeedBorrowKind { fcx: fcx, temp_closure_kinds: NodeMap() }
}
fn check_closure(&mut self,
{
let closure_def_id = self.fcx.tcx.map.local_def_id(expr.id);
if !self.fcx.tables.borrow().closure_kinds.contains_key(&closure_def_id) {
- self.closures_with_inferred_kinds.insert(expr.id);
- self.fcx.tables.borrow_mut().closure_kinds
- .insert(closure_def_id, ty::ClosureKind::Fn);
- debug!("check_closure: adding closure_id={:?} to closures_with_inferred_kinds",
- closure_def_id);
+ self.temp_closure_kinds.insert(expr.id, ty::ClosureKind::Fn);
+ debug!("check_closure: adding closure {:?} as Fn", expr.id);
}
self.fcx.tcx.with_freevars(expr.id, |freevars| {
struct AdjustBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- closures_with_inferred_kinds: &'a HashSet<ast::NodeId>,
+ temp_closure_kinds: NodeMap<ty::ClosureKind>,
}
impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> {
fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- closures_with_inferred_kinds: &'a HashSet<ast::NodeId>)
+ temp_closure_kinds: NodeMap<ty::ClosureKind>)
-> AdjustBorrowKind<'a, 'gcx, 'tcx> {
- AdjustBorrowKind { fcx: fcx, closures_with_inferred_kinds: closures_with_inferred_kinds }
+ AdjustBorrowKind { fcx: fcx, temp_closure_kinds: temp_closure_kinds }
}
fn analyze_closure(&mut self,
debug!("analyze_closure(id={:?}, body.id={:?})", id, body.id);
{
- let mut euv = euv::ExprUseVisitor::new(self, self.fcx);
+ let mut euv =
+ euv::ExprUseVisitor::with_options(self,
+ self.fcx,
+ mc::MemCategorizationOptions {
+ during_closure_kind_inference: true
+ });
euv.walk_fn(decl, body);
}
self.fcx.demand_eqtype(span, final_upvar_ty, upvar_ty);
}
- // Now we must process and remove any deferred resolutions,
- // since we have a concrete closure kind.
+ // If we are also inferred the closure kind here, update the
+ // main table and process any deferred resolutions.
let closure_def_id = self.fcx.tcx.map.local_def_id(id);
- if self.closures_with_inferred_kinds.contains(&id) {
+ if let Some(&kind) = self.temp_closure_kinds.get(&id) {
+ self.fcx.tables.borrow_mut().closure_kinds
+ .insert(closure_def_id, kind);
+ debug!("closure_kind({:?}) = {:?}", closure_def_id, kind);
+
let mut deferred_call_resolutions =
self.fcx.remove_deferred_call_resolutions(closure_def_id);
for deferred_call_resolution in &mut deferred_call_resolutions {
})
}
- fn adjust_upvar_borrow_kind_for_consume(&self,
+ fn adjust_upvar_borrow_kind_for_consume(&mut self,
cmt: mc::cmt<'tcx>,
mode: euv::ConsumeMode)
{
}
}
- fn adjust_upvar_borrow_kind_for_unique(&self, cmt: mc::cmt<'tcx>) {
+ fn adjust_upvar_borrow_kind_for_unique(&mut self, cmt: mc::cmt<'tcx>) {
debug!("adjust_upvar_borrow_kind_for_unique(cmt={:?})",
cmt);
}
}
- fn try_adjust_upvar_deref(&self,
+ fn try_adjust_upvar_deref(&mut self,
note: &mc::Note,
borrow_kind: ty::BorrowKind)
-> bool
/// moving from left to right as needed (but never right to left).
/// Here the argument `mutbl` is the borrow_kind that is required by
/// some particular use.
- fn adjust_upvar_borrow_kind(&self,
+ fn adjust_upvar_borrow_kind(&mut self,
upvar_id: ty::UpvarId,
upvar_capture: &mut ty::UpvarCapture,
kind: ty::BorrowKind) {
}
}
- fn adjust_closure_kind(&self,
+ fn adjust_closure_kind(&mut self,
closure_id: ast::NodeId,
new_kind: ty::ClosureKind) {
debug!("adjust_closure_kind(closure_id={}, new_kind={:?})",
closure_id, new_kind);
- if !self.closures_with_inferred_kinds.contains(&closure_id) {
- return;
- }
-
- let closure_def_id = self.fcx.tcx.map.local_def_id(closure_id);
- let closure_kinds = &mut self.fcx.tables.borrow_mut().closure_kinds;
- let existing_kind = *closure_kinds.get(&closure_def_id).unwrap();
+ if let Some(&existing_kind) = self.temp_closure_kinds.get(&closure_id) {
+ debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}",
+ closure_id, existing_kind, new_kind);
- debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}",
- closure_id, existing_kind, new_kind);
-
- match (existing_kind, new_kind) {
- (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
- (ty::ClosureKind::FnMut, ty::ClosureKind::Fn) |
- (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
- (ty::ClosureKind::FnOnce, _) => {
- // no change needed
- }
+ match (existing_kind, new_kind) {
+ (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
+ (ty::ClosureKind::FnMut, ty::ClosureKind::Fn) |
+ (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
+ (ty::ClosureKind::FnOnce, _) => {
+ // no change needed
+ }
- (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) |
- (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
- (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
- // new kind is stronger than the old kind
- closure_kinds.insert(closure_def_id, new_kind);
+ (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) |
+ (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
+ (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
+ // new kind is stronger than the old kind
+ self.temp_closure_kinds.insert(closure_id, new_kind);
+ }
}
}
}
span: Span,
id: ast::NodeId)
{
- intravisit::walk_fn(self, fn_kind, decl, body, span);
+ intravisit::walk_fn(self, fn_kind, decl, body, span, id);
self.analyze_closure(id, span, decl, body);
}
}
item: &hir::Item,
ast_generics: &hir::Generics)
{
+ let ty = self.tcx().node_id_to_type(item.id);
+ if self.tcx().has_error_field(ty) {
+ return;
+ }
+
let item_def_id = self.tcx().map.local_def_id(item.id);
let ty_predicates = self.tcx().lookup_predicates(item_def_id);
let variances = self.tcx().item_variances(item_def_id);
// reachable from there, to start (if this is an inherent impl,
// then just examine the self type).
let mut input_parameters: HashSet<_> =
- ctp::parameters_for_type(impl_scheme.ty, false).into_iter().collect();
+ ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect();
if let Some(ref trait_ref) = impl_trait_ref {
- input_parameters.extend(ctp::parameters_for_trait_ref(trait_ref, false));
+ input_parameters.extend(ctp::parameters_for(trait_ref, false));
}
ctp::setup_constraining_predicates(impl_predicates.predicates.get_mut_slice(TypeSpace),
let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id);
let mut input_parameters: HashSet<_> =
- ctp::parameters_for_type(impl_scheme.ty, false).into_iter().collect();
+ ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect();
if let Some(ref trait_ref) = impl_trait_ref {
- input_parameters.extend(ctp::parameters_for_trait_ref(trait_ref, false));
+ input_parameters.extend(ctp::parameters_for(trait_ref, false));
}
ctp::identify_constrained_type_params(
&impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters);
ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty,
ty::ConstTraitItem(..) | ty::MethodTraitItem(..) => None
})
- .flat_map(|ty| ctp::parameters_for_type(ty, true))
+ .flat_map(|ty| ctp::parameters_for(&ty, true))
.filter_map(|p| match p {
ctp::Parameter::Type(_) => None,
ctp::Parameter::Region(r) => Some(r),
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use rustc::ty::{self, subst, Ty};
-
+use rustc::ty::{self, Ty};
+use rustc::ty::fold::{TypeFoldable, TypeVisitor};
use std::collections::HashSet;
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
}
/// If `include_projections` is false, returns the list of parameters that are
-/// constrained by the type `ty` - i.e. the value of each parameter in the list is
-/// uniquely determined by `ty` (see RFC 447). If it is true, return the list
+/// constrained by `t` - i.e. the value of each parameter in the list is
+/// uniquely determined by `t` (see RFC 447). If it is true, return the list
/// of parameters whose values are needed in order to constrain `ty` - these
/// differ, with the latter being a superset, in the presence of projections.
-pub fn parameters_for_type<'tcx>(ty: Ty<'tcx>,
- include_projections: bool) -> Vec<Parameter> {
- let mut result = vec![];
- ty.maybe_walk(|t| match t.sty {
- ty::TyProjection(..) if !include_projections => {
+pub fn parameters_for<'tcx, T>(t: &T,
+ include_nonconstraining: bool)
+ -> Vec<Parameter>
+ where T: TypeFoldable<'tcx>
+{
- false // projections are not injective.
- }
- _ => {
- result.append(&mut parameters_for_type_shallow(t));
- // non-projection type constructors are injective.
- true
- }
- });
- result
+ let mut collector = ParameterCollector {
+ parameters: vec![],
+ include_nonconstraining: include_nonconstraining
+ };
+ t.visit_with(&mut collector);
+ collector.parameters
}
-pub fn parameters_for_trait_ref<'tcx>(trait_ref: &ty::TraitRef<'tcx>,
- include_projections: bool) -> Vec<Parameter> {
- let mut region_parameters =
- parameters_for_regions_in_substs(&trait_ref.substs);
-
- let type_parameters =
- trait_ref.substs
- .types
- .iter()
- .flat_map(|ty| parameters_for_type(ty, include_projections));
-
- region_parameters.extend(type_parameters);
-
- region_parameters
+struct ParameterCollector {
+ parameters: Vec<Parameter>,
+ include_nonconstraining: bool
}
-fn parameters_for_type_shallow<'tcx>(ty: Ty<'tcx>) -> Vec<Parameter> {
- match ty.sty {
- ty::TyParam(ref d) =>
- vec![Parameter::Type(d.clone())],
- ty::TyRef(region, _) =>
- parameters_for_region(region).into_iter().collect(),
- ty::TyStruct(_, substs) |
- ty::TyEnum(_, substs) =>
- parameters_for_regions_in_substs(substs),
- ty::TyTrait(ref data) =>
- parameters_for_regions_in_substs(&data.principal.skip_binder().substs),
- ty::TyProjection(ref pi) =>
- parameters_for_regions_in_substs(&pi.trait_ref.substs),
- ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
- ty::TyFloat(..) | ty::TyBox(..) | ty::TyStr |
- ty::TyArray(..) | ty::TySlice(..) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) |
- ty::TyTuple(..) | ty::TyRawPtr(..) |
- ty::TyInfer(..) | ty::TyClosure(..) | ty::TyError =>
- vec![]
- }
-}
+impl<'tcx> TypeVisitor<'tcx> for ParameterCollector {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
+ match t.sty {
+ ty::TyProjection(..) if !self.include_nonconstraining => {
+ // projections are not injective
+ return false;
+ }
+ ty::TyParam(ref d) => {
+ self.parameters.push(Parameter::Type(d.clone()));
+ }
+ _ => {}
+ }
-fn parameters_for_regions_in_substs(substs: &subst::Substs) -> Vec<Parameter> {
- substs.regions
- .iter()
- .filter_map(|r| parameters_for_region(r))
- .collect()
-}
+ t.super_visit_with(self)
+ }
-fn parameters_for_region(region: &ty::Region) -> Option<Parameter> {
- match *region {
- ty::ReEarlyBound(data) => Some(Parameter::Region(data)),
- _ => None,
+ fn visit_region(&mut self, r: ty::Region) -> bool {
+ match r {
+ ty::ReEarlyBound(data) => {
+ self.parameters.push(Parameter::Region(data));
+ }
+ _ => {}
+ }
+ false
}
}
// Then the projection only applies if `T` is known, but it still
// does not determine `U`.
- let inputs = parameters_for_trait_ref(&projection.projection_ty.trait_ref, true);
+ let inputs = parameters_for(&projection.projection_ty.trait_ref, true);
let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p));
if !relies_only_on_inputs {
continue;
}
- input_parameters.extend(parameters_for_type(projection.ty, false));
+ input_parameters.extend(parameters_for(&projection.ty, false));
} else {
continue;
}
https://doc.rust-lang.org/book/traits.html
"##,
-E0174: r##"
-This error occurs because of the explicit use of unboxed closure methods
-that are an experimental feature in current Rust version.
-
-Example of erroneous code:
-
-```compile_fail
-fn foo<F: Fn(&str)>(mut f: F) {
- f.call(("call",));
- // error: explicit use of unboxed closure method `call`
- f.call_mut(("call_mut",));
- // error: explicit use of unboxed closure method `call_mut`
- f.call_once(("call_once",));
- // error: explicit use of unboxed closure method `call_once`
-}
-
-fn bar(text: &str) {
- println!("Calling {} it works!", text);
-}
-
-fn main() {
- foo(bar);
-}
-```
-
-Rust's implementation of closures is a bit different than other languages.
-They are effectively syntax sugar for traits `Fn`, `FnMut` and `FnOnce`.
-To understand better how the closures are implemented see here:
-https://doc.rust-lang.org/book/closures.html#closure-implementation
-
-To fix this you can call them using parenthesis, like this: `foo()`.
-When you execute the closure with parenthesis, under the hood you are executing
-the method `call`, `call_mut` or `call_once`. However, using them explicitly is
-currently an experimental feature.
-
-Example of an implicit call:
-
-```
-fn foo<F: Fn(&str)>(f: F) {
- f("using ()"); // Calling using () it works!
-}
-
-fn bar(text: &str) {
- println!("Calling {} it works!", text);
-}
-
-fn main() {
- foo(bar);
-}
-```
-
-To enable the explicit calls you need to add `#![feature(unboxed_closures)]`.
-
-This feature is still unstable so you will also need to add
-`#![feature(fn_traits)]`.
-More details about this issue here:
-https://github.com/rust-lang/rust/issues/29625
-
-Example of use:
-
-```
-#![feature(fn_traits)]
-#![feature(unboxed_closures)]
-
-fn foo<F: Fn(&str)>(mut f: F) {
- f.call(("call",)); // Calling 'call' it works!
- f.call_mut(("call_mut",)); // Calling 'call_mut' it works!
- f.call_once(("call_once",)); // Calling 'call_once' it works!
-}
-
-fn bar(text: &str) {
- println!("Calling '{}' it works!", text);
-}
-
-fn main() {
- foo(bar);
-}
-```
-
-To see more about closures take a look here:
-https://doc.rust-lang.org/book/closures.html`
-"##,
-
E0178: r##"
In types, the `+` type operator has low precedence, so it is often necessary
to use parentheses.
E0167,
// E0168,
// E0173, // manual implementations of unboxed closure traits are experimental
+// E0174,
E0182,
E0183,
// E0187, // can't infer the kind of the closure
[build-dependencies]
build_helper = { path = "../build_helper" }
-gcc = "0.3"
+gcc = "0.3.27"
impl Clean<Attribute> for ast::MetaItem {
fn clean(&self, cx: &DocContext) -> Attribute {
- match self.node {
- ast::MetaItemKind::Word(ref s) => Word(s.to_string()),
- ast::MetaItemKind::List(ref s, ref l) => {
- List(s.to_string(), l.clean(cx))
- }
- ast::MetaItemKind::NameValue(ref s, ref v) => {
- NameValue(s.to_string(), lit_to_string(v))
- }
- }
+ if self.is_word() {
+ Word(self.name().to_string())
+ } else if let Some(v) = self.value_str() {
+ NameValue(self.name().to_string(), v.to_string())
+ } else { // must be a list
+ let l = self.meta_item_list().unwrap();
+ List(self.name().to_string(), l.clean(cx))
+ }
}
}
impl Clean<Attribute> for ast::Attribute {
fn clean(&self, cx: &DocContext) -> Attribute {
- self.with_desugared_doc(|a| a.node.value.clean(cx))
+ self.with_desugared_doc(|a| a.meta().clean(cx))
}
}
}
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [P<ast::MetaItem>]> { None }
+
+ fn is_word(&self) -> bool {
+ match *self {
+ Word(_) => true,
+ _ => false,
+ }
+ }
+
+ fn is_value_str(&self) -> bool {
+ match *self {
+ NameValue(..) => true,
+ _ => false,
+ }
+ }
+
+ fn is_meta_item_list(&self) -> bool {
+ match *self {
+ List(..) => true,
+ _ => false,
+ }
+ }
+
fn span(&self) -> syntax_pos::Span { unimplemented!() }
}
}
}
-fn lit_to_string(lit: &ast::Lit) -> String {
- match lit.node {
- ast::LitKind::Str(ref st, _) => st.to_string(),
- ast::LitKind::ByteStr(ref data) => format!("{:?}", data),
- ast::LitKind::Byte(b) => {
- let mut res = String::from("b'");
- for c in (b as char).escape_default() {
- res.push(c);
- }
- res.push('\'');
- res
- },
- ast::LitKind::Char(c) => format!("'{}'", c),
- ast::LitKind::Int(i, _t) => i.to_string(),
- ast::LitKind::Float(ref f, _t) => f.to_string(),
- ast::LitKind::FloatUnsuffixed(ref f) => f.to_string(),
- ast::LitKind::Bool(b) => b.to_string(),
- }
-}
-
fn name_from_pat(p: &hir::Pat) -> String {
use rustc::hir::*;
debug!("Trying to get a name from pattern: {:?}", p);
Def::Static(i, _) => (i, TypeStatic),
Def::Variant(i, _) => (i, TypeEnum),
Def::SelfTy(Some(def_id), _) => (def_id, TypeTrait),
- Def::SelfTy(_, Some(impl_id)) => return cx.map.local_def_id(impl_id),
+ Def::SelfTy(_, Some(impl_id)) => {
+ // For Def::SelfTy() values inlined from another crate, the
+ // impl_id will be DUMMY_NODE_ID, which would cause problems.
+ // But we should never run into an impl from another crate here.
+ return cx.map.local_def_id(impl_id)
+ }
_ => return def.def_id()
};
if did.is_local() { return did }
[build-dependencies]
build_helper = { path = "../build_helper" }
-gcc = "0.3"
+gcc = "0.3.27"
[features]
+backtrace = []
jemalloc = ["alloc_jemalloc"]
debug-jemalloc = ["alloc_jemalloc/debug"]
let target = env::var("TARGET").unwrap();
let host = env::var("HOST").unwrap();
- if !target.contains("apple") && !target.contains("msvc") && !target.contains("emscripten"){
+ if cfg!(feature = "backtrace") && !target.contains("apple") && !target.contains("msvc") &&
+ !target.contains("emscripten") {
build_libbacktrace(&host, &target);
}
///
/// This field may not be available on all platforms, and will return an
/// `Err` on platforms where it is not available.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn foo() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = try!(fs::metadata("foo.txt"));
+ ///
+ /// if let Ok(time) = metadata.modified() {
+ /// println!("{:?}", time);
+ /// } else {
+ /// println!("Not supported on this platform");
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "fs_time", since = "1.10.0")]
pub fn modified(&self) -> io::Result<SystemTime> {
self.0.modified().map(FromInner::from_inner)
///
/// This field may not be available on all platforms, and will return an
/// `Err` on platforms where it is not available.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn foo() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = try!(fs::metadata("foo.txt"));
+ ///
+ /// if let Ok(time) = metadata.accessed() {
+ /// println!("{:?}", time);
+ /// } else {
+ /// println!("Not supported on this platform");
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "fs_time", since = "1.10.0")]
pub fn accessed(&self) -> io::Result<SystemTime> {
self.0.accessed().map(FromInner::from_inner)
///
/// This field may not be available on all platforms, and will return an
/// `Err` on platforms where it is not available.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn foo() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = try!(fs::metadata("foo.txt"));
+ ///
+ /// if let Ok(time) = metadata.created() {
+ /// println!("{:?}", time);
+ /// } else {
+ /// println!("Not supported on this platform");
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
#[stable(feature = "fs_time", since = "1.10.0")]
pub fn created(&self) -> io::Result<SystemTime> {
self.0.created().map(FromInner::from_inner)
use mem;
use raw;
use sys_common::rwlock::RWLock;
-use sync::atomic::{AtomicBool, Ordering};
use sys::stdio::Stderr;
-use sys_common::backtrace;
use sys_common::thread_info;
use sys_common::util;
use thread;
static HOOK_LOCK: RWLock = RWLock::new();
static mut HOOK: Hook = Hook::Default;
-static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
/// Registers a custom panic hook, replacing any that was previously registered.
///
}
fn default_hook(info: &PanicInfo) {
- let panics = PANIC_COUNT.with(|c| c.get());
+ #[cfg(any(not(cargobuild), feature = "backtrace"))]
+ use sys_common::backtrace;
// If this is a double panic, make sure that we print a backtrace
// for this panic. Otherwise only print it if logging is enabled.
- let log_backtrace = panics >= 2 || backtrace::log_enabled();
+ #[cfg(any(not(cargobuild), feature = "backtrace"))]
+ let log_backtrace = {
+ let panics = PANIC_COUNT.with(|c| c.get());
+
+ panics >= 2 || backtrace::log_enabled()
+ };
let file = info.location.file;
let line = info.location.line;
let _ = writeln!(err, "thread '{}' panicked at '{}', {}:{}",
name, msg, file, line);
- if log_backtrace {
- let _ = backtrace::write(err);
- } else if FIRST_PANIC.compare_and_swap(true, false, Ordering::SeqCst) {
- let _ = writeln!(err, "note: Run with `RUST_BACKTRACE=1` for a backtrace.");
+ #[cfg(any(not(cargobuild), feature = "backtrace"))]
+ {
+ use sync::atomic::{AtomicBool, Ordering};
+
+ static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
+
+ if log_backtrace {
+ let _ = backtrace::write(err);
+ } else if FIRST_PANIC.compare_and_swap(true, false, Ordering::SeqCst) {
+ let _ = writeln!(err, "note: Run with `RUST_BACKTRACE=1` for a backtrace.");
+ }
}
};
pub mod args;
pub mod at_exit_imp;
+#[cfg(any(not(cargobuild), feature = "backtrace"))]
pub mod backtrace;
pub mod condvar;
pub mod io;
pub mod util;
pub mod wtf8;
+#[cfg(any(not(cargobuild), feature = "backtrace"))]
#[cfg(any(all(unix, not(any(target_os = "macos", target_os = "ios", target_os = "emscripten"))),
all(windows, target_env = "gnu")))]
pub mod gnu;
pub trait PermissionsExt {
/// Returns the underlying raw `mode_t` bits that are the standard Unix
/// permissions for this file.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore
+ /// use std::fs::File;
+ /// use std::os::unix::fs::PermissionsExt;
+ ///
+ /// let f = try!(File::create("foo.txt"));
+ /// let metadata = try!(f.metadata());
+ /// let permissions = metadata.permissions();
+ ///
+ /// println!("permissions: {}", permissions.mode());
+ /// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn mode(&self) -> u32;
/// Sets the underlying raw bits for this set of permissions.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore
+ /// use std::fs::File;
+ /// use std::os::unix::fs::PermissionsExt;
+ ///
+ /// let f = try!(File::create("foo.txt"));
+ /// let metadata = try!(f.metadata());
+ /// let mut permissions = metadata.permissions();
+ ///
+ /// permissions.set_mode(0o644); // Read/write for owner and read for others.
+ /// assert_eq!(permissions.mode(), 0o644);
+ /// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn set_mode(&mut self, mode: u32);
/// Creates a new instance of `Permissions` from the given set of Unix
/// permission bits.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore
+ /// use std::fs::Permissions;
+ /// use std::os::unix::fs::PermissionsExt;
+ ///
+ /// // Read/write for owner and read for others.
+ /// let permissions = Permissions::from_mode(0o644);
+ /// assert_eq!(permissions.mode(), 0o644);
+ /// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn from_mode(mode: u32) -> Self;
}
/// If no `mode` is set, the default of `0o666` will be used.
/// The operating system masks out bits with the systems `umask`, to produce
/// the final permissions.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore
+ /// extern crate libc;
+ /// use std::fs::OpenOptions;
+ /// use std::os::unix::fs::OpenOptionsExt;
+ ///
+ /// let mut options = OpenOptions::new();
+ /// options.mode(0o644); // Give read/write for owner and read for others.
+ /// let file = options.open("foo.txt");
+ /// ```
#[stable(feature = "fs_ext", since = "1.1.0")]
fn mode(&mut self, mode: u32) -> &mut Self;
pub mod weak;
pub mod android;
+#[cfg(any(not(cargobuild), feature = "backtrace"))]
pub mod backtrace;
pub mod condvar;
pub mod ext;
use ast;
use ast::{AttrId, Attribute, Attribute_, MetaItem, MetaItemKind};
use ast::{Expr, Item, Local, Stmt, StmtKind};
-use codemap::{spanned, dummy_spanned, Spanned};
-use syntax_pos::{Span, BytePos};
+use codemap::{respan, spanned, dummy_spanned, Spanned};
+use syntax_pos::{Span, BytePos, DUMMY_SP};
use errors::Handler;
use feature_gate::{Features, GatedCfg};
use parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
/// Gets a list of inner meta items from a list MetaItem type.
fn meta_item_list(&self) -> Option<&[P<MetaItem>]>;
+ /// Indicates if the attribute is a Word.
+ fn is_word(&self) -> bool;
+
+ /// Indicates if the attribute is a Value String.
+ fn is_value_str(&self) -> bool {
+ self.value_str().is_some()
+ }
+
+ /// Indicates if the attribute is a Meta-Item List.
+ fn is_meta_item_list(&self) -> bool {
+ self.meta_item_list().is_some()
+ }
+
fn span(&self) -> Span;
}
self.meta().value_str()
}
fn meta_item_list(&self) -> Option<&[P<MetaItem>]> {
- self.node.value.meta_item_list()
+ self.meta().meta_item_list()
}
+
+ fn is_word(&self) -> bool { self.meta().is_word() }
+
fn span(&self) -> Span { self.meta().span }
}
_ => None
}
}
+
+ fn is_word(&self) -> bool {
+ match self.node {
+ MetaItemKind::Word(_) => true,
+ _ => false,
+ }
+ }
+
fn span(&self) -> Span { self.span }
}
fn meta_item_list(&self) -> Option<&[P<MetaItem>]> {
(**self).meta_item_list()
}
+ fn is_word(&self) -> bool { (**self).is_word() }
+ fn is_value_str(&self) -> bool { (**self).is_value_str() }
+ fn is_meta_item_list(&self) -> bool { (**self).is_meta_item_list() }
fn span(&self) -> Span { (**self).span() }
}
pub fn mk_name_value_item_str(name: InternedString, value: InternedString)
-> P<MetaItem> {
let value_lit = dummy_spanned(ast::LitKind::Str(value, ast::StrStyle::Cooked));
- mk_name_value_item(name, value_lit)
+ mk_spanned_name_value_item(DUMMY_SP, name, value_lit)
}
pub fn mk_name_value_item(name: InternedString, value: ast::Lit)
-> P<MetaItem> {
- P(dummy_spanned(MetaItemKind::NameValue(name, value)))
+ mk_spanned_name_value_item(DUMMY_SP, name, value)
}
pub fn mk_list_item(name: InternedString, items: Vec<P<MetaItem>>) -> P<MetaItem> {
- P(dummy_spanned(MetaItemKind::List(name, items)))
+ mk_spanned_list_item(DUMMY_SP, name, items)
}
pub fn mk_word_item(name: InternedString) -> P<MetaItem> {
- P(dummy_spanned(MetaItemKind::Word(name)))
+ mk_spanned_word_item(DUMMY_SP, name)
+}
+
+pub fn mk_spanned_name_value_item(sp: Span, name: InternedString, value: ast::Lit)
+ -> P<MetaItem> {
+ P(respan(sp, MetaItemKind::NameValue(name, value)))
+}
+
+pub fn mk_spanned_list_item(sp: Span, name: InternedString, items: Vec<P<MetaItem>>)
+ -> P<MetaItem> {
+ P(respan(sp, MetaItemKind::List(name, items)))
+}
+
+pub fn mk_spanned_word_item(sp: Span, name: InternedString) -> P<MetaItem> {
+ P(respan(sp, MetaItemKind::Word(name)))
}
+
+
thread_local! { static NEXT_ATTR_ID: Cell<usize> = Cell::new(0) }
pub fn mk_attr_id() -> AttrId {
/// Returns an inner attribute with the given value.
pub fn mk_attr_inner(id: AttrId, item: P<MetaItem>) -> Attribute {
- dummy_spanned(Attribute_ {
- id: id,
- style: ast::AttrStyle::Inner,
- value: item,
- is_sugared_doc: false,
- })
+ mk_spanned_attr_inner(DUMMY_SP, id, item)
+}
+
+/// Returns an innter attribute with the given value and span.
+pub fn mk_spanned_attr_inner(sp: Span, id: AttrId, item: P<MetaItem>) -> Attribute {
+ respan(sp,
+ Attribute_ {
+ id: id,
+ style: ast::AttrStyle::Inner,
+ value: item,
+ is_sugared_doc: false,
+ })
}
+
/// Returns an outer attribute with the given value.
pub fn mk_attr_outer(id: AttrId, item: P<MetaItem>) -> Attribute {
+ mk_spanned_attr_outer(DUMMY_SP, id, item)
+}
+
+/// Returns an outer attribute with the given value and span.
+pub fn mk_spanned_attr_outer(sp: Span, id: AttrId, item: P<MetaItem>) -> Attribute {
+ respan(sp,
+ Attribute_ {
+ id: id,
+ style: ast::AttrStyle::Outer,
+ value: item,
+ is_sugared_doc: false,
+ })
+}
+
+pub fn mk_doc_attr_outer(id: AttrId, item: P<MetaItem>, is_sugared_doc: bool) -> Attribute {
dummy_spanned(Attribute_ {
id: id,
style: ast::AttrStyle::Outer,
value: item,
- is_sugared_doc: false,
+ is_sugared_doc: is_sugared_doc,
})
}
respan(DUMMY_SP, t)
}
+/// Build a span that covers the two provided spans.
+pub fn combine_spans(sp1: Span, sp2: Span) -> Span {
+ if sp1 == DUMMY_SP && sp2 == DUMMY_SP {
+ DUMMY_SP
+ } else if sp1 == DUMMY_SP {
+ sp2
+ } else if sp2 == DUMMY_SP {
+ sp1
+ } else {
+ Span {
+ lo: if sp1.lo < sp2.lo { sp1.lo } else { sp2.lo },
+ hi: if sp1.hi > sp2.hi { sp1.hi } else { sp2.hi },
+ expn_id: if sp1.expn_id == sp2.expn_id { sp1.expn_id } else { NO_EXPANSION },
+ }
+ }
+}
+
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
}
fn attribute(&self, sp: Span, mi: P<ast::MetaItem>) -> ast::Attribute {
- respan(sp, ast::Attribute_ {
- id: attr::mk_attr_id(),
- style: ast::AttrStyle::Outer,
- value: mi,
- is_sugared_doc: false,
- })
+ attr::mk_spanned_attr_outer(sp, attr::mk_attr_id(), mi)
}
fn meta_word(&self, sp: Span, w: InternedString) -> P<ast::MetaItem> {
- P(respan(sp, ast::MetaItemKind::Word(w)))
+ attr::mk_spanned_word_item(sp, w)
}
- fn meta_list(&self,
- sp: Span,
- name: InternedString,
- mis: Vec<P<ast::MetaItem>> )
+ fn meta_list(&self, sp: Span, name: InternedString, mis: Vec<P<ast::MetaItem>>)
-> P<ast::MetaItem> {
- P(respan(sp, ast::MetaItemKind::List(name, mis)))
+ attr::mk_spanned_list_item(sp, name, mis)
}
- fn meta_name_value(&self,
- sp: Span,
- name: InternedString,
- value: ast::LitKind)
+ fn meta_name_value(&self, sp: Span, name: InternedString, value: ast::LitKind)
-> P<ast::MetaItem> {
- P(respan(sp, ast::MetaItemKind::NameValue(name, respan(sp, value))))
+ attr::mk_spanned_name_value_item(sp, name, respan(sp, value))
}
fn item_use(&self, sp: Span,
};
if is_use {
- match attr.node.value.node {
- ast::MetaItemKind::Word(..) => (),
- _ => fld.cx.span_err(attr.span, "arguments to macro_use are not allowed here"),
+ if !attr.is_word() {
+ fld.cx.span_err(attr.span, "arguments to macro_use are not allowed here");
}
return true;
}
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
+ ("rustc_partition_reused", Whitelisted, Gated("rustc_attrs",
+ "this attribute \
+ is just used for rustc unit tests \
+ and will never be stable",
+ cfg_fn!(rustc_attrs))),
+ ("rustc_partition_translated", Whitelisted, Gated("rustc_attrs",
+ "this attribute \
+ is just used for rustc unit tests \
+ and will never be stable",
+ cfg_fn!(rustc_attrs))),
("rustc_symbol_name", Whitelisted, Gated("rustc_attrs",
"internal rustc attributes will never be stable",
cfg_fn!(rustc_attrs))),
}}
}
+impl<'a> PostExpansionVisitor<'a> {
+ fn check_abi(&self, abi: Abi, span: Span) {
+ match abi {
+ Abi::RustIntrinsic =>
+ gate_feature_post!(&self, intrinsics, span,
+ "intrinsics are subject to change"),
+ Abi::PlatformIntrinsic => {
+ gate_feature_post!(&self, platform_intrinsics, span,
+ "platform intrinsics are experimental and possibly buggy")
+ },
+ Abi::Vectorcall => {
+ gate_feature_post!(&self, abi_vectorcall, span,
+ "vectorcall is experimental and subject to change")
+ }
+ Abi::RustCall => {
+ gate_feature_post!(&self, unboxed_closures, span,
+ "rust-call ABI is subject to change");
+ }
+ _ => {}
+ }
+ }
+}
+
impl<'a> Visitor for PostExpansionVisitor<'a> {
fn visit_attribute(&mut self, attr: &ast::Attribute) {
if !self.context.cm.span_allows_unstable(attr.span) {
across platforms, it is recommended to \
use `#[link(name = \"foo\")]` instead")
}
- match foreign_module.abi {
- Abi::RustIntrinsic =>
- gate_feature_post!(&self, intrinsics, i.span,
- "intrinsics are subject to change"),
- Abi::PlatformIntrinsic => {
- gate_feature_post!(&self, platform_intrinsics, i.span,
- "platform intrinsics are experimental \
- and possibly buggy")
- },
- Abi::Vectorcall => {
- gate_feature_post!(&self, abi_vectorcall, i.span,
- "vectorcall is experimental and subject to change")
- }
- _ => ()
- }
+ self.check_abi(foreign_module.abi, i.span);
}
ast::ItemKind::Fn(..) => {
visit::walk_foreign_item(self, i)
}
+ fn visit_ty(&mut self, ty: &ast::Ty) {
+ match ty.node {
+ ast::TyKind::BareFn(ref bare_fn_ty) => {
+ self.check_abi(bare_fn_ty.abi, ty.span);
+ }
+ _ => {}
+ }
+ visit::walk_ty(self, ty)
+ }
+
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprKind::Box(_) => {
}
match fn_kind {
- FnKind::ItemFn(_, _, _, _, abi, _) if abi == Abi::RustIntrinsic => {
- gate_feature_post!(&self, intrinsics,
- span,
- "intrinsics are subject to change")
- }
FnKind::ItemFn(_, _, _, _, abi, _) |
- FnKind::Method(_, &ast::MethodSig { abi, .. }, _) => match abi {
- Abi::RustCall => {
- gate_feature_post!(&self, unboxed_closures, span,
- "rust-call ABI is subject to change");
- },
- Abi::Vectorcall => {
- gate_feature_post!(&self, abi_vectorcall, span,
- "vectorcall is experimental and subject to change");
- },
- _ => {}
- },
+ FnKind::Method(_, &ast::MethodSig { abi, .. }, _) => {
+ self.check_abi(abi, span);
+ }
_ => {}
}
visit::walk_fn(self, fn_kind, fn_decl, block, span);
ti.span,
"associated constants are experimental")
}
- ast::TraitItemKind::Method(ref sig, _) => {
+ ast::TraitItemKind::Method(ref sig, ref block) => {
+ if block.is_none() {
+ self.check_abi(sig.abi, ti.span);
+ }
if sig.constness == ast::Constness::Const {
gate_feature_post!(&self, const_fn, ti.span, "const fn is unstable");
}
}
Some(list) => {
for mi in list {
- let name = match mi.node {
- ast::MetaItemKind::Word(ref word) => (*word).clone(),
- _ => {
- span_err!(span_handler, mi.span, E0556,
- "malformed feature, expected just one word");
- continue
- }
- };
+ let name = if mi.is_word() {
+ mi.name()
+ } else {
+ span_err!(span_handler, mi.span, E0556,
+ "malformed feature, expected just one word");
+ continue
+ };
if let Some(&(_, _, _, setter)) = ACTIVE_FEATURES.iter()
.find(|& &(n, _, _, _)| name == n) {
*(setter(&mut features)) = true;
cfg: ast::CrateConfig,
ts: tokenstream::TokenStream)
-> Parser<'a> {
- tts_to_parser(sess, ts.tts, cfg)
+ tts_to_parser(sess, ts.to_tts(), cfg)
}
//! or a SequenceRepetition specifier (for the purpose of sequence generation during macro
//! expansion).
//!
-//! A TokenStream also has a slice view, `TokenSlice`, that is analogous to `str` for
-//! `String`: it allows the programmer to divvy up, explore, and otherwise partition a
-//! TokenStream as borrowed subsequences.
+//! ## Ownership
+//! TokenStreams are persistant data structures construced as ropes with reference
+//! counted-children. In general, this means that calling an operation on a TokenStream
+//! (such as `slice`) produces an entirely new TokenStream from the borrowed reference to
+//! the original. This essentially coerces TokenStreams into 'views' of their subparts,
+//! and a borrowed TokenStream is sufficient to build an owned TokenStream without taking
+//! ownership of the original.
use ast::{self, AttrStyle, LitKind};
use syntax_pos::{Span, DUMMY_SP, NO_EXPANSION};
-use codemap::Spanned;
+use codemap::{Spanned, combine_spans};
use ext::base;
use ext::tt::macro_parser;
use parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
use parse::lexer;
use parse;
-use parse::token::{self, Token, Lit, InternedString, Nonterminal};
-use parse::token::Lit as TokLit;
+use parse::token::{self, Token, Lit, Nonterminal};
use std::fmt;
-use std::mem;
-use std::ops::Index;
-use std::ops;
use std::iter::*;
-
+use std::ops::{self, Index};
use std::rc::Rc;
/// A delimited sequence of token trees
/// struct itself shouldn't be directly manipulated; the internal structure is not stable,
/// and may be changed at any time in the future. The operators will not, however (except
/// for signatures, later on).
-#[derive(Eq,Clone,Hash,RustcEncodable,RustcDecodable)]
+#[derive(Clone, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct TokenStream {
- pub span: Span,
- pub tts: Vec<TokenTree>,
+ ts: InternalTS,
+}
+
+// NB If Leaf access proves to be slow, inroducing a secondary Leaf without the bounds
+// for unsliced Leafs may lead to some performance improvemenet.
+#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub enum InternalTS {
+ Empty(Span),
+ Leaf {
+ tts: Rc<Vec<TokenTree>>,
+ offset: usize,
+ len: usize,
+ sp: Span,
+ },
+ Node {
+ left: Rc<InternalTS>,
+ right: Rc<InternalTS>,
+ len: usize,
+ sp: Span,
+ },
}
impl fmt::Debug for TokenStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- if self.tts.len() == 0 {
- write!(f, "([empty")?;
- } else {
- write!(f, "([")?;
- write!(f, "{:?}", self.tts[0])?;
-
- for tt in self.tts.iter().skip(1) {
- write!(f, ",{:?}", tt)?;
+ self.ts.fmt(f)
+ }
+}
+
+impl fmt::Debug for InternalTS {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ InternalTS::Empty(..) => Ok(()),
+ InternalTS::Leaf { ref tts, offset, len, .. } => {
+ for t in tts.iter().skip(offset).take(len) {
+ try!(write!(f, "{:?}", t));
+ }
+ Ok(())
+ }
+ InternalTS::Node { ref left, ref right, .. } => {
+ try!(left.fmt(f));
+ right.fmt(f)
}
}
- write!(f, "|")?;
- self.span.fmt(f)?;
- write!(f, "])")
}
}
/// equality, see `eq_unspanned`.
impl PartialEq<TokenStream> for TokenStream {
fn eq(&self, other: &TokenStream) -> bool {
- self.tts == other.tts
+ self.iter().eq(other.iter())
}
}
}
}
+impl InternalTS {
+ fn len(&self) -> usize {
+ match *self {
+ InternalTS::Empty(..) => 0,
+ InternalTS::Leaf { len, .. } => len,
+ InternalTS::Node { len, .. } => len,
+ }
+ }
+
+ fn span(&self) -> Span {
+ match *self {
+ InternalTS::Empty(sp) |
+ InternalTS::Leaf { sp, .. } |
+ InternalTS::Node { sp, .. } => sp,
+ }
+ }
+
+ fn slice(&self, range: ops::Range<usize>) -> TokenStream {
+ let from = range.start;
+ let to = range.end;
+ if from == to {
+ return TokenStream::mk_empty();
+ }
+ if from > to {
+ panic!("Invalid range: {} to {}", from, to);
+ }
+ if from == 0 && to == self.len() {
+ return TokenStream { ts: self.clone() }; /* should be cheap */
+ }
+ match *self {
+ InternalTS::Empty(..) => panic!("Invalid index"),
+ InternalTS::Leaf { ref tts, offset, .. } => {
+ let offset = offset + from;
+ let len = to - from;
+ TokenStream::mk_sub_leaf(tts.clone(),
+ offset,
+ len,
+ covering_span(&tts[offset..offset + len]))
+ }
+ InternalTS::Node { ref left, ref right, .. } => {
+ let left_len = left.len();
+ if to <= left_len {
+ left.slice(range)
+ } else if from >= left_len {
+ right.slice(from - left_len..to - left_len)
+ } else {
+ TokenStream::concat(left.slice(from..left_len), right.slice(0..to - left_len))
+ }
+ }
+ }
+ }
+}
+
/// TokenStream operators include basic destructuring, boolean operations, `maybe_...`
/// operations, and `maybe_..._prefix` operations. Boolean operations are straightforward,
/// indicating information about the structure of the stream. The `maybe_...` operations
///
/// `maybe_path_prefix("a::b::c(a,b,c).foo()") -> (a::b::c, "(a,b,c).foo()")`
impl TokenStream {
- /// Convert a vector of `TokenTree`s into a `TokenStream`.
- pub fn from_tts(trees: Vec<TokenTree>) -> TokenStream {
- let span = covering_span(&trees);
- TokenStream {
- tts: trees,
- span: span,
- }
+ pub fn mk_empty() -> TokenStream {
+ TokenStream { ts: InternalTS::Empty(DUMMY_SP) }
}
- /// Copies all of the TokenTrees from the TokenSlice, appending them to the stream.
- pub fn append_stream(mut self, ts2: &TokenSlice) {
- for tt in ts2.iter() {
- self.tts.push(tt.clone());
- }
- self.span = covering_span(&self.tts[..]);
+ fn mk_spanned_empty(sp: Span) -> TokenStream {
+ TokenStream { ts: InternalTS::Empty(sp) }
}
- /// Manually change a TokenStream's span.
- pub fn respan(self, span: Span) -> TokenStream {
+ fn mk_leaf(tts: Rc<Vec<TokenTree>>, sp: Span) -> TokenStream {
+ let len = tts.len();
TokenStream {
- tts: self.tts,
- span: span,
+ ts: InternalTS::Leaf {
+ tts: tts,
+ offset: 0,
+ len: len,
+ sp: sp,
+ },
}
}
- /// Construct a TokenStream from an ast literal.
- pub fn from_ast_lit_str(lit: ast::Lit) -> Option<TokenStream> {
- match lit.node {
- LitKind::Str(val, _) => {
- let val = TokLit::Str_(token::intern(&val));
- Some(TokenStream::from_tts(vec![TokenTree::Token(lit.span,
- Token::Literal(val, None))]))
- }
- _ => None,
+ fn mk_sub_leaf(tts: Rc<Vec<TokenTree>>, offset: usize, len: usize, sp: Span) -> TokenStream {
+ TokenStream {
+ ts: InternalTS::Leaf {
+ tts: tts,
+ offset: offset,
+ len: len,
+ sp: sp,
+ },
}
-
}
- /// Convert a vector of TokenTrees into a parentheses-delimited TokenStream.
- pub fn as_paren_delimited_stream(tts: Vec<TokenTree>) -> TokenStream {
- let new_sp = covering_span(&tts);
-
- let new_delim = Rc::new(Delimited {
- delim: token::DelimToken::Paren,
- open_span: DUMMY_SP,
- tts: tts,
- close_span: DUMMY_SP,
- });
-
- TokenStream::from_tts(vec![TokenTree::Delimited(new_sp, new_delim)])
+ fn mk_int_node(left: Rc<InternalTS>,
+ right: Rc<InternalTS>,
+ len: usize,
+ sp: Span)
+ -> TokenStream {
+ TokenStream {
+ ts: InternalTS::Node {
+ left: left,
+ right: right,
+ len: len,
+ sp: sp,
+ },
+ }
}
- /// Convert an interned string into a one-element TokenStream.
- pub fn from_interned_string_as_ident(s: InternedString) -> TokenStream {
- TokenStream::from_tts(vec![TokenTree::Token(DUMMY_SP,
- Token::Ident(token::str_to_ident(&s[..])))])
+ /// Convert a vector of `TokenTree`s into a `TokenStream`.
+ pub fn from_tts(trees: Vec<TokenTree>) -> TokenStream {
+ let span = covering_span(&trees[..]);
+ TokenStream::mk_leaf(Rc::new(trees), span)
}
-}
-
-/// TokenSlices are 'views' of `TokenStream's; they fit the same role as `str`s do for
-/// `String`s. In general, most TokenStream manipulations will be refocusing their internal
-/// contents by taking a TokenSlice and then using indexing and the provided operators.
-#[derive(PartialEq, Eq, Debug)]
-pub struct TokenSlice([TokenTree]);
-
-impl ops::Deref for TokenStream {
- type Target = TokenSlice;
- fn deref(&self) -> &TokenSlice {
- let tts: &[TokenTree] = &*self.tts;
- unsafe { mem::transmute(tts) }
+ /// Manually change a TokenStream's span.
+ pub fn respan(self, span: Span) -> TokenStream {
+ match self.ts {
+ InternalTS::Empty(..) => TokenStream::mk_spanned_empty(span),
+ InternalTS::Leaf { tts, offset, len, .. } => {
+ TokenStream::mk_sub_leaf(tts, offset, len, span)
+ }
+ InternalTS::Node { left, right, len, .. } => {
+ TokenStream::mk_int_node(left, right, len, span)
+ }
+ }
}
-}
-impl TokenSlice {
- /// Convert a borrowed TokenTree slice into a borrowed TokenSlice.
- fn from_tts(tts: &[TokenTree]) -> &TokenSlice {
- unsafe { mem::transmute(tts) }
+ /// Concatenates two TokenStreams into a new TokenStream
+ pub fn concat(left: TokenStream, right: TokenStream) -> TokenStream {
+ let new_len = left.len() + right.len();
+ let new_span = combine_spans(left.span(), right.span());
+ TokenStream::mk_int_node(Rc::new(left.ts), Rc::new(right.ts), new_len, new_span)
}
- /// Indicates whether the `TokenStream` is empty.
+ /// Indicate if the TokenStream is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
- /// Return the `TokenSlice`'s length.
+ /// Return a TokenStream's length.
pub fn len(&self) -> usize {
- self.0.len()
+ self.ts.len()
}
- /// Check equality versus another TokenStream, ignoring span information.
- pub fn eq_unspanned(&self, other: &TokenSlice) -> bool {
- if self.len() != other.len() {
- return false;
- }
- for (tt1, tt2) in self.iter().zip(other.iter()) {
- if !tt1.eq_unspanned(tt2) {
- return false;
+ /// Convert a TokenStream into a vector of borrowed TokenTrees.
+ pub fn to_vec(&self) -> Vec<&TokenTree> {
+ fn internal_to_vec(ts: &InternalTS) -> Vec<&TokenTree> {
+ match *ts {
+ InternalTS::Empty(..) => Vec::new(),
+ InternalTS::Leaf { ref tts, offset, len, .. } => {
+ tts[offset..offset + len].iter().collect()
+ }
+ InternalTS::Node { ref left, ref right, .. } => {
+ let mut v1 = internal_to_vec(left);
+ let mut v2 = internal_to_vec(right);
+ v1.append(&mut v2);
+ v1
+ }
}
}
- true
+ internal_to_vec(&self.ts)
}
- /// Compute a span that covers the entire TokenSlice (eg, one wide enough to include
- /// the entire slice). If the inputs share expansion identification, it is preserved.
- /// If they do not, it is discarded.
- pub fn covering_span(&self) -> Span {
- covering_span(&self.0)
+ /// Convert a TokenStream into a vector of TokenTrees (by cloning the TokenTrees).
+ /// (This operation is an O(n) deep copy of the underlying structure.)
+ pub fn to_tts(&self) -> Vec<TokenTree> {
+ self.to_vec().into_iter().cloned().collect::<Vec<TokenTree>>()
}
- /// Indicates where the stream is of the form `= <ts>`, where `<ts>` is a continued
- /// `TokenStream`.
- pub fn is_assignment(&self) -> bool {
- self.maybe_assignment().is_some()
+ /// Return the TokenStream's span.
+ pub fn span(&self) -> Span {
+ self.ts.span()
}
- /// Returns the RHS of an assigment.
- pub fn maybe_assignment(&self) -> Option<&TokenSlice> {
- if !(self.len() > 1) {
- return None;
+ /// Returns an iterator over a TokenStream (as a sequence of TokenTrees).
+ pub fn iter<'a>(&self) -> Iter {
+ Iter { vs: self, idx: 0 }
+ }
+
+ /// Splits a TokenStream based on the provided `&TokenTree -> bool` predicate.
+ pub fn split<P>(&self, pred: P) -> Split<P>
+ where P: FnMut(&TokenTree) -> bool
+ {
+ Split {
+ vs: self,
+ pred: pred,
+ finished: false,
+ idx: 0,
}
+ }
+
+ /// Produce a slice of the input TokenStream from the `from` index, inclusive, to the
+ /// `to` index, non-inclusive.
+ pub fn slice(&self, range: ops::Range<usize>) -> TokenStream {
+ self.ts.slice(range)
+ }
+
+ /// Slice starting at the provided index, inclusive.
+ pub fn slice_from(&self, from: ops::RangeFrom<usize>) -> TokenStream {
+ self.slice(from.start..self.len())
+ }
- Some(&self[1..])
+ /// Slice up to the provided index, non-inclusive.
+ pub fn slice_to(&self, to: ops::RangeTo<usize>) -> TokenStream {
+ self.slice(0..to.end)
}
/// Indicates where the stream is a single, delimited expression (e.g., `(a,b,c)` or
}
/// Returns the inside of the delimited term as a new TokenStream.
- pub fn maybe_delimited(&self) -> Option<&TokenSlice> {
+ pub fn maybe_delimited(&self) -> Option<TokenStream> {
if !(self.len() == 1) {
return None;
}
+ // FIXME It would be nice to change Delimited to move the Rc around the TokenTree
+ // vector directly in order to avoid the clone here.
match self[0] {
- TokenTree::Delimited(_, ref rc) => Some(TokenSlice::from_tts(&*rc.tts)),
- _ => None,
- }
- }
-
- /// Returns a list of `TokenSlice`s if the stream is a delimited list, breaking the
- /// stream on commas.
- pub fn maybe_comma_list(&self) -> Option<Vec<&TokenSlice>> {
- let maybe_tts = self.maybe_delimited();
-
- let ts: &TokenSlice;
- match maybe_tts {
- Some(t) => {
- ts = t;
- }
- None => {
- return None;
- }
- }
-
- let splits: Vec<&TokenSlice> = ts.split(|x| match *x {
- TokenTree::Token(_, Token::Comma) => true,
- _ => false,
- })
- .filter(|x| x.len() > 0)
- .collect();
-
- Some(splits)
- }
-
- /// Returns a Nonterminal if it is Interpolated.
- pub fn maybe_interpolated_nonterminal(&self) -> Option<Nonterminal> {
- if !(self.len() == 1) {
- return None;
- }
-
- match self[0] {
- TokenTree::Token(_, Token::Interpolated(ref nt)) => Some(nt.clone()),
+ TokenTree::Delimited(_, ref rc) => Some(TokenStream::from_tts(rc.tts.clone())),
_ => None,
}
}
return None;
}
- let tok = if let Some(tts) = self.maybe_delimited() {
- if tts.len() != 1 {
- return None;
- }
- &tts[0]
- } else {
- &self[0]
- };
-
- match *tok {
+ match self[0] {
TokenTree::Token(_, Token::Ident(t)) => Some(t),
_ => None,
}
}
- /// Indicates if the stream is exactly one literal
- pub fn is_lit(&self) -> bool {
- self.maybe_lit().is_some()
- }
-
- /// Returns a literal
- pub fn maybe_lit(&self) -> Option<token::Lit> {
- if !(self.len() == 1) {
- return None;
- }
-
- let tok = if let Some(tts) = self.maybe_delimited() {
- if tts.len() != 1 {
- return None;
- }
- &tts[0]
- } else {
- &self[0]
- };
-
- match *tok {
- TokenTree::Token(_, Token::Literal(l, _)) => Some(l),
- _ => None,
- }
- }
-
- /// Returns an AST string literal if the TokenStream is either a normal ('cooked') or
- /// raw string literal.
- pub fn maybe_str(&self) -> Option<ast::Lit> {
- if !(self.len() == 1) {
- return None;
- }
-
- match self[0] {
- TokenTree::Token(sp, Token::Literal(Lit::Str_(s), _)) => {
- let l = LitKind::Str(token::intern_and_get_ident(&parse::str_lit(&s.as_str())),
- ast::StrStyle::Cooked);
- Some(Spanned {
- node: l,
- span: sp,
- })
- }
- TokenTree::Token(sp, Token::Literal(Lit::StrRaw(s, n), _)) => {
- let l = LitKind::Str(token::intern_and_get_ident(&parse::raw_str_lit(&s.as_str())),
- ast::StrStyle::Raw(n));
- Some(Spanned {
- node: l,
- span: sp,
- })
+ /// Compares two TokenStreams, checking equality without regarding span information.
+ pub fn eq_unspanned(&self, other: &TokenStream) -> bool {
+ for (t1, t2) in self.iter().zip(other.iter()) {
+ if !t1.eq_unspanned(t2) {
+ return false;
}
- _ => None,
}
+ true
}
- /// This operation extracts the path prefix , returning an AST path struct and the remainder
- /// of the stream (if it finds one). To be more specific, a tokenstream that has a valid,
- /// non-global path as a prefix (eg `foo(bar, baz)`, `foo::bar(bar)`, but *not*
- /// `::foo::bar(baz)`) will yield the path and the remaining tokens (as a slice). The previous
- /// examples will yield
- /// `Some((Path { segments = vec![foo], ... }, [(bar, baz)]))`,
- /// `Some((Path { segments = vec![foo, bar] }, [(baz)]))`,
- /// and `None`, respectively.
- pub fn maybe_path_prefix(&self) -> Option<(ast::Path, &TokenSlice)> {
- let mut segments: Vec<ast::PathSegment> = Vec::new();
-
- let path: Vec<&TokenTree> = self.iter()
- .take_while(|x| x.is_ident() || x.eq_token(Token::ModSep))
- .collect::<Vec<&TokenTree>>();
-
- let path_size = path.len();
- if path_size == 0 {
- return None;
- }
-
- let cov_span = self[..path_size].covering_span();
- let rst = &self[path_size..];
-
- let fst_id = path[0];
-
- if let Some(id) = fst_id.maybe_ident() {
- segments.push(ast::PathSegment {
- identifier: id,
- parameters: ast::PathParameters::none(),
- });
- } else {
- return None;
- }
-
- // Let's use a state machine to parse out the rest.
- enum State {
- Mod, // Expect a `::`, or return None otherwise.
- Ident, // Expect an ident, or return None otherwise.
- }
- let mut state = State::Mod;
-
- for p in &path[1..] {
- match state {
- State::Mod => {
- // State 0: ['::' -> state 1, else return None]
- if p.eq_token(Token::ModSep) {
- state = State::Ident;
- } else {
- return None;
- }
- }
- State::Ident => {
- // State 1: [ident -> state 0, else return None]
- if let Some(id) = p.maybe_ident() {
- segments.push(ast::PathSegment {
- identifier: id,
- parameters: ast::PathParameters::none(),
- });
- state = State::Mod;
- } else {
- return None;
- }
- }
- }
- }
-
- let path = ast::Path {
- span: cov_span,
- global: false,
- segments: segments,
- };
- Some((path, rst))
- }
+ /// Convert a vector of TokenTrees into a parentheses-delimited TokenStream.
+ pub fn as_delimited_stream(tts: Vec<TokenTree>, delim: token::DelimToken) -> TokenStream {
+ let new_sp = covering_span(&tts);
- /// Returns an iterator over a TokenSlice (as a sequence of TokenStreams).
- fn iter(&self) -> Iter {
- Iter { vs: self }
- }
+ let new_delim = Rc::new(Delimited {
+ delim: delim,
+ open_span: DUMMY_SP,
+ tts: tts,
+ close_span: DUMMY_SP,
+ });
- /// Splits a TokenSlice based on the provided `&TokenTree -> bool` predicate.
- fn split<P>(&self, pred: P) -> Split<P>
- where P: FnMut(&TokenTree) -> bool
- {
- Split {
- vs: self,
- pred: pred,
- finished: false,
- }
+ TokenStream::from_tts(vec![TokenTree::Delimited(new_sp, new_delim)])
}
}
+// FIXME Reimplement this iterator to hold onto a slice iterator for a leaf, getting the
+// next leaf's iterator when the current one is exhausted.
pub struct Iter<'a> {
- vs: &'a TokenSlice,
+ vs: &'a TokenStream,
+ idx: usize,
}
impl<'a> Iterator for Iter<'a> {
type Item = &'a TokenTree;
fn next(&mut self) -> Option<&'a TokenTree> {
- if self.vs.is_empty() {
+ if self.vs.is_empty() || self.idx >= self.vs.len() {
return None;
}
- let ret = Some(&self.vs[0]);
- self.vs = &self.vs[1..];
+ let ret = Some(&self.vs[self.idx]);
+ self.idx = self.idx + 1;
ret
}
}
pub struct Split<'a, P>
where P: FnMut(&TokenTree) -> bool
{
- vs: &'a TokenSlice,
+ vs: &'a TokenStream,
pred: P,
finished: bool,
+ idx: usize,
}
impl<'a, P> Iterator for Split<'a, P>
where P: FnMut(&TokenTree) -> bool
{
- type Item = &'a TokenSlice;
+ type Item = TokenStream;
- fn next(&mut self) -> Option<&'a TokenSlice> {
+ fn next(&mut self) -> Option<TokenStream> {
if self.finished {
return None;
}
+ if self.idx >= self.vs.len() {
+ self.finished = true;
+ return None;
+ }
- match self.vs.iter().position(|x| (self.pred)(x)) {
+ let mut lookup = self.vs.iter().skip(self.idx);
+ match lookup.position(|x| (self.pred)(&x)) {
None => {
self.finished = true;
- Some(&self.vs[..])
+ Some(self.vs.slice_from(self.idx..))
}
- Some(idx) => {
- let ret = Some(&self.vs[..idx]);
- self.vs = &self.vs[idx + 1..];
+ Some(edx) => {
+ let ret = Some(self.vs.slice(self.idx..self.idx + edx));
+ self.idx += edx + 1;
ret
}
}
type Output = TokenTree;
fn index(&self, index: usize) -> &TokenTree {
- Index::index(&**self, index)
+ &self.ts[index]
}
}
-impl ops::Index<ops::Range<usize>> for TokenStream {
- type Output = TokenSlice;
+impl Index<usize> for InternalTS {
+ type Output = TokenTree;
- fn index(&self, index: ops::Range<usize>) -> &TokenSlice {
- Index::index(&**self, index)
+ fn index(&self, index: usize) -> &TokenTree {
+ if self.len() <= index {
+ panic!("Index {} too large for {:?}", index, self);
+ }
+ match *self {
+ InternalTS::Empty(..) => panic!("Invalid index"),
+ InternalTS::Leaf { ref tts, offset, .. } => tts.get(index + offset).unwrap(),
+ InternalTS::Node { ref left, ref right, .. } => {
+ let left_len = left.len();
+ if index < left_len {
+ Index::index(&**left, index)
+ } else {
+ Index::index(&**right, index - left_len)
+ }
+ }
+ }
}
}
-impl ops::Index<ops::RangeTo<usize>> for TokenStream {
- type Output = TokenSlice;
-
- fn index(&self, index: ops::RangeTo<usize>) -> &TokenSlice {
- Index::index(&**self, index)
- }
-}
-impl ops::Index<ops::RangeFrom<usize>> for TokenStream {
- type Output = TokenSlice;
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use syntax_pos::{Span, BytePos, NO_EXPANSION, DUMMY_SP};
+ use parse::token::{self, str_to_ident, Token};
+ use util::parser_testing::string_to_tts;
+ use std::rc::Rc;
- fn index(&self, index: ops::RangeFrom<usize>) -> &TokenSlice {
- Index::index(&**self, index)
+ fn sp(a: u32, b: u32) -> Span {
+ Span {
+ lo: BytePos(a),
+ hi: BytePos(b),
+ expn_id: NO_EXPANSION,
+ }
}
-}
-
-impl ops::Index<ops::RangeFull> for TokenStream {
- type Output = TokenSlice;
- fn index(&self, _index: ops::RangeFull) -> &TokenSlice {
- Index::index(&**self, _index)
+ fn as_paren_delimited_stream(tts: Vec<TokenTree>) -> TokenStream {
+ TokenStream::as_delimited_stream(tts, token::DelimToken::Paren)
}
-}
-
-impl Index<usize> for TokenSlice {
- type Output = TokenTree;
- fn index(&self, index: usize) -> &TokenTree {
- &self.0[index]
+ #[test]
+ fn test_concat() {
+ let test_res = TokenStream::from_tts(string_to_tts("foo::bar::baz".to_string()));
+ let test_fst = TokenStream::from_tts(string_to_tts("foo::bar".to_string()));
+ let test_snd = TokenStream::from_tts(string_to_tts("::baz".to_string()));
+ let eq_res = TokenStream::concat(test_fst, test_snd);
+ assert_eq!(test_res.len(), 5);
+ assert_eq!(eq_res.len(), 5);
+ assert_eq!(test_res.eq_unspanned(&eq_res), true);
}
-}
-
-impl ops::Index<ops::Range<usize>> for TokenSlice {
- type Output = TokenSlice;
- fn index(&self, index: ops::Range<usize>) -> &TokenSlice {
- TokenSlice::from_tts(&self.0[index])
+ #[test]
+ fn test_from_to_bijection() {
+ let test_start = string_to_tts("foo::bar(baz)".to_string());
+ let test_end = TokenStream::from_tts(string_to_tts("foo::bar(baz)".to_string())).to_tts();
+ assert_eq!(test_start, test_end)
}
-}
-impl ops::Index<ops::RangeTo<usize>> for TokenSlice {
- type Output = TokenSlice;
+ #[test]
+ fn test_to_from_bijection() {
+ let test_start = TokenStream::from_tts(string_to_tts("foo::bar(baz)".to_string()));
+ let test_end = TokenStream::from_tts(test_start.clone().to_tts());
+ assert_eq!(test_start, test_end)
+ }
- fn index(&self, index: ops::RangeTo<usize>) -> &TokenSlice {
- TokenSlice::from_tts(&self.0[index])
+ #[test]
+ fn test_eq_0() {
+ let test_res = TokenStream::from_tts(string_to_tts("foo".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("foo".to_string()));
+ assert_eq!(test_res, test_eqs)
}
-}
-impl ops::Index<ops::RangeFrom<usize>> for TokenSlice {
- type Output = TokenSlice;
+ #[test]
+ fn test_eq_1() {
+ let test_res = TokenStream::from_tts(string_to_tts("::bar::baz".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("::bar::baz".to_string()));
+ assert_eq!(test_res, test_eqs)
+ }
- fn index(&self, index: ops::RangeFrom<usize>) -> &TokenSlice {
- TokenSlice::from_tts(&self.0[index])
+ #[test]
+ fn test_eq_2() {
+ let test_res = TokenStream::from_tts(string_to_tts("foo::bar".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("foo::bar::baz".to_string()));
+ assert_eq!(test_res, test_eqs.slice(0..3))
}
-}
-impl ops::Index<ops::RangeFull> for TokenSlice {
- type Output = TokenSlice;
+ #[test]
+ fn test_eq_3() {
+ let test_res = TokenStream::from_tts(string_to_tts("".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("".to_string()));
+ assert_eq!(test_res, test_eqs)
+ }
- fn index(&self, _index: ops::RangeFull) -> &TokenSlice {
- TokenSlice::from_tts(&self.0[_index])
+ #[test]
+ fn test_diseq_0() {
+ let test_res = TokenStream::from_tts(string_to_tts("::bar::baz".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("bar::baz".to_string()));
+ assert_eq!(test_res == test_eqs, false)
}
-}
+ #[test]
+ fn test_diseq_1() {
+ let test_res = TokenStream::from_tts(string_to_tts("(bar,baz)".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("bar,baz".to_string()));
+ assert_eq!(test_res == test_eqs, false)
+ }
-#[cfg(test)]
-mod tests {
- use super::*;
- use ast;
- use syntax_pos::{Span, BytePos, NO_EXPANSION, DUMMY_SP};
- use parse::token::{self, str_to_ident, Token, Lit};
- use util::parser_testing::string_to_tts;
- use std::rc::Rc;
+ #[test]
+ fn test_slice_0() {
+ let test_res = TokenStream::from_tts(string_to_tts("foo::bar".to_string()));
+ let test_eqs = TokenStream::from_tts(string_to_tts("foo::bar::baz".to_string()));
+ assert_eq!(test_res, test_eqs.slice(0..3))
+ }
- fn sp(a: u32, b: u32) -> Span {
- Span {
- lo: BytePos(a),
- hi: BytePos(b),
- expn_id: NO_EXPANSION,
- }
+ #[test]
+ fn test_slice_1() {
+ let test_res = TokenStream::from_tts(string_to_tts("foo::bar::baz".to_string()))
+ .slice(2..3);
+ let test_eqs = TokenStream::from_tts(vec![TokenTree::Token(sp(5,8),
+ token::Ident(str_to_ident("bar")))]);
+ assert_eq!(test_res, test_eqs)
}
#[test]
assert_eq!(test5.is_delimited(), false);
}
- #[test]
- fn test_is_assign() {
- let test0 = TokenStream::from_tts(string_to_tts("= bar::baz".to_string()));
- let test1 = TokenStream::from_tts(string_to_tts("= \"5\"".to_string()));
- let test2 = TokenStream::from_tts(string_to_tts("= 5".to_string()));
- let test3 = TokenStream::from_tts(string_to_tts("(foo = 10)".to_string()));
- let test4 = TokenStream::from_tts(string_to_tts("= (foo,bar,baz)".to_string()));
- let test5 = TokenStream::from_tts(string_to_tts("".to_string()));
-
- assert_eq!(test0.is_assignment(), true);
- assert_eq!(test1.is_assignment(), true);
- assert_eq!(test2.is_assignment(), true);
- assert_eq!(test3.is_assignment(), false);
- assert_eq!(test4.is_assignment(), true);
- assert_eq!(test5.is_assignment(), false);
- }
-
- #[test]
- fn test_is_lit() {
- let test0 = TokenStream::from_tts(string_to_tts("\"foo\"".to_string()));
- let test1 = TokenStream::from_tts(string_to_tts("5".to_string()));
- let test2 = TokenStream::from_tts(string_to_tts("foo".to_string()));
- let test3 = TokenStream::from_tts(string_to_tts("foo::bar".to_string()));
- let test4 = TokenStream::from_tts(string_to_tts("foo(bar)".to_string()));
-
- assert_eq!(test0.is_lit(), true);
- assert_eq!(test1.is_lit(), true);
- assert_eq!(test2.is_lit(), false);
- assert_eq!(test3.is_lit(), false);
- assert_eq!(test4.is_lit(), false);
- }
-
#[test]
fn test_is_ident() {
let test0 = TokenStream::from_tts(string_to_tts("\"foo\"".to_string()));
assert_eq!(test4.is_ident(), false);
}
- #[test]
- fn test_maybe_assignment() {
- let test0_input = TokenStream::from_tts(string_to_tts("= bar::baz".to_string()));
- let test1_input = TokenStream::from_tts(string_to_tts("= \"5\"".to_string()));
- let test2_input = TokenStream::from_tts(string_to_tts("= 5".to_string()));
- let test3_input = TokenStream::from_tts(string_to_tts("(foo = 10)".to_string()));
- let test4_input = TokenStream::from_tts(string_to_tts("= (foo,bar,baz)".to_string()));
- let test5_input = TokenStream::from_tts(string_to_tts("".to_string()));
-
- let test0 = test0_input.maybe_assignment();
- let test1 = test1_input.maybe_assignment();
- let test2 = test2_input.maybe_assignment();
- let test3 = test3_input.maybe_assignment();
- let test4 = test4_input.maybe_assignment();
- let test5 = test5_input.maybe_assignment();
-
- let test0_expected = TokenStream::from_tts(vec![TokenTree::Token(sp(2, 5),
- token::Ident(str_to_ident("bar"))),
- TokenTree::Token(sp(5, 7), token::ModSep),
- TokenTree::Token(sp(7, 10),
- token::Ident(str_to_ident("baz")))]);
- assert_eq!(test0, Some(&test0_expected[..]));
-
- let test1_expected = TokenStream::from_tts(vec![TokenTree::Token(sp(2, 5),
- token::Literal(Lit::Str_(token::intern("5")), None))]);
- assert_eq!(test1, Some(&test1_expected[..]));
-
- let test2_expected = TokenStream::from_tts(vec![TokenTree::Token( sp(2,3)
- , token::Literal(
- Lit::Integer(
- token::intern(&(5.to_string()))),
- None))]);
- assert_eq!(test2, Some(&test2_expected[..]));
-
- assert_eq!(test3, None);
-
-
- let test4_tts = vec![TokenTree::Token(sp(3, 6), token::Ident(str_to_ident("foo"))),
- TokenTree::Token(sp(6, 7), token::Comma),
- TokenTree::Token(sp(7, 10), token::Ident(str_to_ident("bar"))),
- TokenTree::Token(sp(10, 11), token::Comma),
- TokenTree::Token(sp(11, 14), token::Ident(str_to_ident("baz")))];
-
- let test4_expected = TokenStream::from_tts(vec![TokenTree::Delimited(sp(2, 15),
- Rc::new(Delimited {
- delim: token::DelimToken::Paren,
- open_span: sp(2, 3),
- tts: test4_tts,
- close_span: sp(14, 15),
- }))]);
- assert_eq!(test4, Some(&test4_expected[..]));
-
- assert_eq!(test5, None);
-
- }
-
#[test]
fn test_maybe_delimited() {
let test0_input = TokenStream::from_tts(string_to_tts("foo(bar::baz)".to_string()));
TokenTree::Token(sp(4, 6), token::ModSep),
TokenTree::Token(sp(6, 9),
token::Ident(str_to_ident("baz")))]);
- assert_eq!(test1, Some(&test1_expected[..]));
+ assert_eq!(test1, Some(test1_expected));
let test2_expected = TokenStream::from_tts(vec![TokenTree::Token(sp(1, 4),
token::Ident(str_to_ident("foo"))),
TokenTree::Token(sp(8, 9), token::Comma),
TokenTree::Token(sp(9, 12),
token::Ident(str_to_ident("baz")))]);
- assert_eq!(test2, Some(&test2_expected[..]));
-
- assert_eq!(test3, None);
-
- assert_eq!(test4, None);
-
- assert_eq!(test5, None);
- }
-
- #[test]
- fn test_maybe_comma_list() {
- let test0_input = TokenStream::from_tts(string_to_tts("foo(bar::baz)".to_string()));
- let test1_input = TokenStream::from_tts(string_to_tts("(bar::baz)".to_string()));
- let test2_input = TokenStream::from_tts(string_to_tts("(foo,bar,baz)".to_string()));
- let test3_input = TokenStream::from_tts(string_to_tts("(foo::bar,bar,baz)".to_string()));
- let test4_input = TokenStream::from_tts(string_to_tts("(foo,bar,baz)(zab,rab)"
- .to_string()));
- let test5_input = TokenStream::from_tts(string_to_tts("(foo,bar,baz)foo".to_string()));
- let test6_input = TokenStream::from_tts(string_to_tts("".to_string()));
- // The following is supported behavior!
- let test7_input = TokenStream::from_tts(string_to_tts("(foo,bar,)".to_string()));
-
- let test0 = test0_input.maybe_comma_list();
- let test1 = test1_input.maybe_comma_list();
- let test2 = test2_input.maybe_comma_list();
- let test3 = test3_input.maybe_comma_list();
- let test4 = test4_input.maybe_comma_list();
- let test5 = test5_input.maybe_comma_list();
- let test6 = test6_input.maybe_comma_list();
- let test7 = test7_input.maybe_comma_list();
-
- assert_eq!(test0, None);
-
- let test1_stream = TokenStream::from_tts(vec![TokenTree::Token(sp(1, 4),
- token::Ident(str_to_ident("bar"))),
- TokenTree::Token(sp(4, 6), token::ModSep),
- TokenTree::Token(sp(6, 9),
- token::Ident(str_to_ident("baz")))]);
-
- let test1_expected: Vec<&TokenSlice> = vec![&test1_stream[..]];
- assert_eq!(test1, Some(test1_expected));
-
- let test2_foo = TokenStream::from_tts(vec![TokenTree::Token(sp(1, 4),
- token::Ident(str_to_ident("foo")))]);
- let test2_bar = TokenStream::from_tts(vec![TokenTree::Token(sp(5, 8),
- token::Ident(str_to_ident("bar")))]);
- let test2_baz = TokenStream::from_tts(vec![TokenTree::Token(sp(9, 12),
- token::Ident(str_to_ident("baz")))]);
- let test2_expected: Vec<&TokenSlice> = vec![&test2_foo[..], &test2_bar[..], &test2_baz[..]];
assert_eq!(test2, Some(test2_expected));
- let test3_path = TokenStream::from_tts(vec![TokenTree::Token(sp(1, 4),
- token::Ident(str_to_ident("foo"))),
- TokenTree::Token(sp(4, 6), token::ModSep),
- TokenTree::Token(sp(6, 9),
- token::Ident(str_to_ident("bar")))]);
- let test3_bar = TokenStream::from_tts(vec![TokenTree::Token(sp(10, 13),
- token::Ident(str_to_ident("bar")))]);
- let test3_baz = TokenStream::from_tts(vec![TokenTree::Token(sp(14, 17),
- token::Ident(str_to_ident("baz")))]);
- let test3_expected: Vec<&TokenSlice> =
- vec![&test3_path[..], &test3_bar[..], &test3_baz[..]];
- assert_eq!(test3, Some(test3_expected));
+ assert_eq!(test3, None);
assert_eq!(test4, None);
assert_eq!(test5, None);
-
- assert_eq!(test6, None);
-
-
- let test7_expected: Vec<&TokenSlice> = vec![&test2_foo[..], &test2_bar[..]];
- assert_eq!(test7, Some(test7_expected));
}
// pub fn maybe_ident(&self) -> Option<ast::Ident>
assert_eq!(test4, None);
}
- // pub fn maybe_lit(&self) -> Option<token::Lit>
#[test]
- fn test_maybe_lit() {
- let test0 = TokenStream::from_tts(string_to_tts("\"foo\"".to_string())).maybe_lit();
- let test1 = TokenStream::from_tts(string_to_tts("5".to_string())).maybe_lit();
- let test2 = TokenStream::from_tts(string_to_tts("foo".to_string())).maybe_lit();
- let test3 = TokenStream::from_tts(string_to_tts("foo::bar".to_string())).maybe_lit();
- let test4 = TokenStream::from_tts(string_to_tts("foo(bar)".to_string())).maybe_lit();
-
- assert_eq!(test0, Some(Lit::Str_(token::intern("foo"))));
- assert_eq!(test1, Some(Lit::Integer(token::intern(&(5.to_string())))));
- assert_eq!(test2, None);
- assert_eq!(test3, None);
- assert_eq!(test4, None);
- }
-
- #[test]
- fn test_maybe_path_prefix() {
- let test0_input = TokenStream::from_tts(string_to_tts("foo(bar::baz)".to_string()));
- let test1_input = TokenStream::from_tts(string_to_tts("(bar::baz)".to_string()));
- let test2_input = TokenStream::from_tts(string_to_tts("(foo,bar,baz)".to_string()));
- let test3_input = TokenStream::from_tts(string_to_tts("foo::bar(bar,baz)".to_string()));
-
- let test0 = test0_input.maybe_path_prefix();
- let test1 = test1_input.maybe_path_prefix();
- let test2 = test2_input.maybe_path_prefix();
- let test3 = test3_input.maybe_path_prefix();
-
- let test0_tts = vec![TokenTree::Token(sp(4, 7), token::Ident(str_to_ident("bar"))),
- TokenTree::Token(sp(7, 9), token::ModSep),
- TokenTree::Token(sp(9, 12), token::Ident(str_to_ident("baz")))];
-
- let test0_stream = TokenStream::from_tts(vec![TokenTree::Delimited(sp(3, 13),
- Rc::new(Delimited {
- delim: token::DelimToken::Paren,
- open_span: sp(3, 4),
- tts: test0_tts,
- close_span: sp(12, 13),
- }))]);
-
- let test0_expected = Some((ast::Path::from_ident(sp(0, 3), str_to_ident("foo")),
- &test0_stream[..]));
- assert_eq!(test0, test0_expected);
-
- assert_eq!(test1, None);
- assert_eq!(test2, None);
-
- let test3_path = ast::Path {
- span: sp(0, 8),
- global: false,
- segments: vec![ast::PathSegment {
- identifier: str_to_ident("foo"),
- parameters: ast::PathParameters::none(),
- },
- ast::PathSegment {
- identifier: str_to_ident("bar"),
- parameters: ast::PathParameters::none(),
- }],
- };
-
- let test3_tts = vec![TokenTree::Token(sp(9, 12), token::Ident(str_to_ident("bar"))),
- TokenTree::Token(sp(12, 13), token::Comma),
- TokenTree::Token(sp(13, 16), token::Ident(str_to_ident("baz")))];
-
- let test3_stream = TokenStream::from_tts(vec![TokenTree::Delimited(sp(8, 17),
- Rc::new(Delimited {
- delim: token::DelimToken::Paren,
- open_span: sp(8, 9),
- tts: test3_tts,
- close_span: sp(16, 17),
- }))]);
- let test3_expected = Some((test3_path, &test3_stream[..]));
- assert_eq!(test3, test3_expected);
- }
-
- #[test]
- fn test_as_paren_delimited_stream() {
- let test0 = TokenStream::as_paren_delimited_stream(string_to_tts("foo,bar,".to_string()));
- let test1 = TokenStream::as_paren_delimited_stream(string_to_tts("baz(foo,bar)"
- .to_string()));
+ fn test_as_delimited_stream() {
+ let test0 = as_paren_delimited_stream(string_to_tts("foo,bar,".to_string()));
+ let test1 = as_paren_delimited_stream(string_to_tts("baz(foo,bar)".to_string()));
let test0_tts = vec![TokenTree::Token(sp(0, 3), token::Ident(str_to_ident("foo"))),
TokenTree::Token(sp(3, 4), token::Comma),
assert_eq!(test1, test1_stream);
}
-
}
//! The compiler code necessary to implement the `#[derive]` extensions.
-use syntax::ast::{self, MetaItem, MetaItemKind};
+use syntax::ast::{MetaItem, self};
use syntax::attr::AttrMetaMethods;
use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxEnv};
use syntax::ext::base::{MultiDecorator, MultiItemDecorator, MultiModifier};
let mut eq_span = None;
for titem in traits.iter().rev() {
- let tname = match titem.node {
- MetaItemKind::Word(ref tname) => tname,
- _ => {
- cx.span_err(titem.span, "malformed `derive` entry");
- continue;
- }
- };
-
- if !(is_builtin_trait(tname) || cx.ecfg.enable_custom_derive()) {
+ let tname = if titem.is_word() {
+ titem.name() }
+ else {
+ cx.span_err(titem.span, "malformed `derive` entry");
+ continue;
+ };
+
+ if !(is_builtin_trait(&tname) || cx.ecfg.enable_custom_derive()) {
feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
"custom_derive",
titem.span,
let arg_idx = match arg_index_consumed.get_mut(i) {
None => 0, // error already emitted elsewhere
Some(offset) => {
- let arg_idx = self.arg_index_map[i][*offset];
+ let ref idx_map = self.arg_index_map[i];
+ // unwrap_or branch: error already emitted elsewhere
+ let arg_idx = *idx_map.get(*offset).unwrap_or(&0);
*offset += 1;
arg_idx
}
}
}
+ pub fn from_span(primary_span: Span) -> MultiSpan {
+ MultiSpan {
+ primary_spans: vec![primary_span],
+ span_labels: vec![]
+ }
+ }
+
+ pub fn from_spans(vec: Vec<Span>) -> MultiSpan {
+ MultiSpan {
+ primary_spans: vec,
+ span_labels: vec![]
+ }
+ }
+
pub fn push_span_label(&mut self, span: Span, label: String) {
self.span_labels.push((span, label));
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
- MultiSpan {
- primary_spans: vec![span],
- span_labels: vec![]
- }
+ MultiSpan::from_span(span)
}
}
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(!1);
+// For code generated by a procedural macro, without knowing which
+// Used in `qquote!`
+pub const PROC_EXPN: ExpnId = ExpnId(!2);
+
impl ExpnId {
pub fn from_u32(id: u32) -> ExpnId {
ExpnId(id)
let target = env::var("TARGET").unwrap();
if target.contains("linux") {
- if target.contains("musl") && (target.contains("x86_64") || target.contains("i686")) {
+ if target.contains("musl") && !target.contains("mips") {
println!("cargo:rustc-link-lib=static=unwind");
} else if !target.contains("android") {
println!("cargo:rustc-link-lib=gcc_s");
#![allow(bad_style)]
-use libc;
-
-#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
-pub use self::_Unwind_Action::*;
-#[cfg(target_arch = "arm")]
-pub use self::_Unwind_State::*;
-pub use self::_Unwind_Reason_Code::*;
-
-#[cfg(any(not(target_arch = "arm"), target_os = "ios"))]
-#[repr(C)]
-#[derive(Clone, Copy)]
-pub enum _Unwind_Action {
- _UA_SEARCH_PHASE = 1,
- _UA_CLEANUP_PHASE = 2,
- _UA_HANDLER_FRAME = 4,
- _UA_FORCE_UNWIND = 8,
- _UA_END_OF_STACK = 16,
+macro_rules! cfg_if {
+ ( $( if #[cfg( $meta:meta )] { $($it1:item)* } else { $($it2:item)* } )* ) =>
+ ( $( $( #[cfg($meta)] $it1)* $( #[cfg(not($meta))] $it2)* )* )
}
-#[cfg(target_arch = "arm")]
-#[repr(C)]
-#[derive(Clone, Copy)]
-pub enum _Unwind_State {
- _US_VIRTUAL_UNWIND_FRAME = 0,
- _US_UNWIND_FRAME_STARTING = 1,
- _US_UNWIND_FRAME_RESUME = 2,
- _US_ACTION_MASK = 3,
- _US_FORCE_UNWIND = 8,
- _US_END_OF_STACK = 16,
-}
+use libc::{c_int, c_void, uintptr_t};
#[repr(C)]
+#[derive(Copy, Clone, PartialEq)]
pub enum _Unwind_Reason_Code {
_URC_NO_REASON = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
_URC_HANDLER_FOUND = 6,
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8,
- _URC_FAILURE = 9, // used only by ARM EABI
+ _URC_FAILURE = 9, // used only by ARM EHABI
}
+pub use self::_Unwind_Reason_Code::*;
pub type _Unwind_Exception_Class = u64;
-
-pub type _Unwind_Word = libc::uintptr_t;
-pub type _Unwind_Ptr = libc::uintptr_t;
-
-pub type _Unwind_Trace_Fn = extern "C" fn(ctx: *mut _Unwind_Context, arg: *mut libc::c_void)
+pub type _Unwind_Word = uintptr_t;
+pub type _Unwind_Ptr = uintptr_t;
+pub type _Unwind_Trace_Fn = extern "C" fn(ctx: *mut _Unwind_Context, arg: *mut c_void)
-> _Unwind_Reason_Code;
-
#[cfg(target_arch = "x86")]
pub const unwinder_private_data_size: usize = 5;
pub type _Unwind_Exception_Cleanup_Fn = extern "C" fn(unwind_code: _Unwind_Reason_Code,
exception: *mut _Unwind_Exception);
-
-#[cfg_attr(any(all(target_os = "linux", not(target_env = "musl")),
- target_os = "freebsd",
- target_os = "solaris",
- all(target_os = "linux",
- target_env = "musl",
- not(target_arch = "x86"),
- not(target_arch = "x86_64"))),
- link(name = "gcc_s"))]
-#[cfg_attr(all(target_os = "linux",
- target_env = "musl",
- any(target_arch = "x86", target_arch = "x86_64"),
- not(test)),
- link(name = "unwind", kind = "static"))]
-#[cfg_attr(any(target_os = "android", target_os = "openbsd"),
- link(name = "gcc"))]
-#[cfg_attr(all(target_os = "netbsd", not(target_vendor = "rumprun")),
- link(name = "gcc"))]
-#[cfg_attr(all(target_os = "netbsd", target_vendor = "rumprun"),
- link(name = "unwind"))]
-#[cfg_attr(target_os = "dragonfly",
- link(name = "gcc_pic"))]
-#[cfg_attr(target_os = "bitrig",
- link(name = "c++abi"))]
-#[cfg_attr(all(target_os = "windows", target_env = "gnu"),
- link(name = "gcc_eh"))]
-#[cfg(not(cargobuild))]
-extern "C" {}
-
extern "C" {
- // iOS on armv7 uses SjLj exceptions and requires to link
- // against corresponding routine (..._SjLj_...)
- #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
- #[unwind]
- pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
-
- #[cfg(all(target_os = "ios", target_arch = "arm"))]
- #[unwind]
- fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
-
- pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
-
#[unwind]
pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !;
-
- // No native _Unwind_Backtrace on iOS
- #[cfg(not(all(target_os = "ios", target_arch = "arm")))]
- pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
- trace_argument: *mut libc::c_void)
- -> _Unwind_Reason_Code;
-
- // available since GCC 4.2.0, should be fine for our purpose
- #[cfg(all(not(all(target_os = "android", target_arch = "arm")),
- not(all(target_os = "linux", target_arch = "arm"))))]
- pub fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
- ip_before_insn: *mut libc::c_int)
- -> libc::uintptr_t;
-
- pub fn _Unwind_GetLanguageSpecificData(ctx: *mut _Unwind_Context) -> _Unwind_Ptr;
+ pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
+ pub fn _Unwind_GetLanguageSpecificData(ctx: *mut _Unwind_Context) -> *mut c_void;
pub fn _Unwind_GetRegionStart(ctx: *mut _Unwind_Context) -> _Unwind_Ptr;
pub fn _Unwind_GetTextRelBase(ctx: *mut _Unwind_Context) -> _Unwind_Ptr;
pub fn _Unwind_GetDataRelBase(ctx: *mut _Unwind_Context) -> _Unwind_Ptr;
- pub fn _Unwind_SetGR(ctx: *mut _Unwind_Context, reg_index: libc::c_int, value: _Unwind_Ptr);
- pub fn _Unwind_SetIP(ctx: *mut _Unwind_Context, value: _Unwind_Ptr);
-
- #[cfg(all(not(target_os = "android"),
- not(all(target_os = "linux", target_arch = "arm"))))]
- pub fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void) -> *mut libc::c_void;
}
-// ... and now we just providing access to SjLj counterspart
-// through a standard name to hide those details from others
-// (see also comment above regarding _Unwind_RaiseException)
-#[cfg(all(target_os = "ios", target_arch = "arm"))]
-#[inline]
-pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception) -> _Unwind_Reason_Code {
- _Unwind_SjLj_RaiseException(exc)
-}
+cfg_if! {
+if #[cfg(not(any(all(target_os = "android", target_arch = "arm"),
+ all(target_os = "linux", target_arch = "arm"))))] {
+ // Not ARM EHABI
+ #[repr(C)]
+ #[derive(Copy, Clone, PartialEq)]
+ pub enum _Unwind_Action {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16,
+ }
+ pub use self::_Unwind_Action::*;
+
+ extern "C" {
+ pub fn _Unwind_GetGR(ctx: *mut _Unwind_Context, reg_index: c_int) -> _Unwind_Word;
+ pub fn _Unwind_SetGR(ctx: *mut _Unwind_Context, reg_index: c_int, value: _Unwind_Word);
+ pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> _Unwind_Word;
+ pub fn _Unwind_SetIP(ctx: *mut _Unwind_Context, value: _Unwind_Word);
+ pub fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context, ip_before_insn: *mut c_int)
+ -> _Unwind_Word;
+ pub fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void;
+ }
+
+} else {
+ // ARM EHABI
+ #[repr(C)]
+ #[derive(Copy, Clone, PartialEq)]
+ pub enum _Unwind_State {
+ _US_VIRTUAL_UNWIND_FRAME = 0,
+ _US_UNWIND_FRAME_STARTING = 1,
+ _US_UNWIND_FRAME_RESUME = 2,
+ _US_ACTION_MASK = 3,
+ _US_FORCE_UNWIND = 8,
+ _US_END_OF_STACK = 16,
+ }
+ pub use self::_Unwind_State::*;
-// On android, the function _Unwind_GetIP is a macro, and this is the
-// expansion of the macro. This is all copy/pasted directly from the
-// header file with the definition of _Unwind_GetIP.
-#[cfg(any(all(target_os = "android", target_arch = "arm"),
- all(target_os = "linux", target_arch = "arm")))]
-pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
#[repr(C)]
enum _Unwind_VRS_Result {
_UVRSR_OK = 0,
_UVRSC_WMMXD = 3,
_UVRSC_WMMXC = 4,
}
+ use self::_Unwind_VRS_RegClass::*;
#[repr(C)]
enum _Unwind_VRS_DataRepresentation {
_UVRSD_UINT32 = 0,
_UVRSD_FLOAT = 4,
_UVRSD_DOUBLE = 5,
}
+ use self::_Unwind_VRS_DataRepresentation::*;
+
+ pub const UNWIND_POINTER_REG: c_int = 12;
+ pub const UNWIND_IP_REG: c_int = 15;
- type _Unwind_Word = libc::c_uint;
extern "C" {
fn _Unwind_VRS_Get(ctx: *mut _Unwind_Context,
- klass: _Unwind_VRS_RegClass,
- word: _Unwind_Word,
+ regclass: _Unwind_VRS_RegClass,
+ regno: _Unwind_Word,
+ repr: _Unwind_VRS_DataRepresentation,
+ data: *mut c_void)
+ -> _Unwind_VRS_Result;
+
+ fn _Unwind_VRS_Set(ctx: *mut _Unwind_Context,
+ regclass: _Unwind_VRS_RegClass,
+ regno: _Unwind_Word,
repr: _Unwind_VRS_DataRepresentation,
- data: *mut libc::c_void)
+ data: *mut c_void)
-> _Unwind_VRS_Result;
}
- let mut val: _Unwind_Word = 0;
- let ptr = &mut val as *mut _Unwind_Word;
- let _ = _Unwind_VRS_Get(ctx,
- _Unwind_VRS_RegClass::_UVRSC_CORE,
- 15,
- _Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
- ptr as *mut libc::c_void);
- (val & !1) as libc::uintptr_t
-}
+ // On Android or ARM/Linux, these are implemented as macros:
+
+ pub unsafe fn _Unwind_GetGR(ctx: *mut _Unwind_Context, reg_index: c_int) -> _Unwind_Word {
+ let mut val: _Unwind_Word = 0;
+ _Unwind_VRS_Get(ctx, _UVRSC_CORE, reg_index as _Unwind_Word, _UVRSD_UINT32,
+ &mut val as *mut _ as *mut c_void);
+ val
+ }
-// This function doesn't exist on Android or ARM/Linux, so make it same
-// to _Unwind_GetIP
-#[cfg(any(all(target_os = "android", target_arch = "arm"),
- all(target_os = "linux", target_arch = "arm")))]
-pub unsafe fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
- ip_before_insn: *mut libc::c_int)
- -> libc::uintptr_t {
- *ip_before_insn = 0;
- _Unwind_GetIP(ctx)
+ pub unsafe fn _Unwind_SetGR(ctx: *mut _Unwind_Context, reg_index: c_int, value: _Unwind_Word) {
+ let mut value = value;
+ _Unwind_VRS_Set(ctx, _UVRSC_CORE, reg_index as _Unwind_Word, _UVRSD_UINT32,
+ &mut value as *mut _ as *mut c_void);
+ }
+
+ pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context)
+ -> _Unwind_Word {
+ let val = _Unwind_GetGR(ctx, UNWIND_IP_REG);
+ (val & !1) as _Unwind_Word
+ }
+
+ pub unsafe fn _Unwind_SetIP(ctx: *mut _Unwind_Context,
+ value: _Unwind_Word) {
+ // Propagate thumb bit to instruction pointer
+ let thumb_state = _Unwind_GetGR(ctx, UNWIND_IP_REG) & 1;
+ let value = value | thumb_state;
+ _Unwind_SetGR(ctx, UNWIND_IP_REG, value);
+ }
+
+ pub unsafe fn _Unwind_GetIPInfo(ctx: *mut _Unwind_Context,
+ ip_before_insn: *mut c_int)
+ -> _Unwind_Word {
+ *ip_before_insn = 0;
+ _Unwind_GetIP(ctx)
+ }
+
+ // This function also doesn't exist on Android or ARM/Linux, so make it a no-op
+ pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void {
+ pc
+ }
}
-// This function also doesn't exist on Android or ARM/Linux, so make it
-// a no-op
-#[cfg(any(target_os = "android",
- all(target_os = "linux", target_arch = "arm")))]
-pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut libc::c_void) -> *mut libc::c_void {
- pc
+if #[cfg(not(all(target_os = "ios", target_arch = "arm")))] {
+ // Not 32-bit iOS
+ extern "C" {
+ #[unwind]
+ pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
+ pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
+ trace_argument: *mut c_void)
+ -> _Unwind_Reason_Code;
+ }
+} else {
+ // 32-bit iOS uses SjLj and does not provide _Unwind_Backtrace()
+ extern "C" {
+ #[unwind]
+ pub fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
+ }
+
+ #[inline]
+ pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception) -> _Unwind_Reason_Code {
+ _Unwind_SjLj_RaiseException(exc)
+ }
}
+} // cfg_if!
+
+#[cfg_attr(any(all(target_os = "linux", not(target_env = "musl")),
+ target_os = "freebsd",
+ target_os = "solaris",
+ all(target_os = "linux",
+ target_env = "musl",
+ not(target_arch = "x86"),
+ not(target_arch = "x86_64"))),
+ link(name = "gcc_s"))]
+#[cfg_attr(all(target_os = "linux",
+ target_env = "musl",
+ any(target_arch = "x86", target_arch = "x86_64"),
+ not(test)),
+ link(name = "unwind", kind = "static"))]
+#[cfg_attr(any(target_os = "android", target_os = "openbsd"),
+ link(name = "gcc"))]
+#[cfg_attr(all(target_os = "netbsd", not(target_vendor = "rumprun")),
+ link(name = "gcc"))]
+#[cfg_attr(all(target_os = "netbsd", target_vendor = "rumprun"),
+ link(name = "unwind"))]
+#[cfg_attr(target_os = "dragonfly",
+ link(name = "gcc_pic"))]
+#[cfg_attr(target_os = "bitrig",
+ link(name = "c++abi"))]
+#[cfg_attr(all(target_os = "windows", target_env = "gnu"),
+ link(name = "gcc_eh"))]
+#[cfg(not(cargobuild))]
+extern "C" {}
-Subproject commit 7ca76af03bb04659562890d6b4f223fffe0d748f
+Subproject commit d1cc48989b13780f21c408fef17dceb104a09c9d
dependencies = [
"build_helper 0.1.0",
"core 0.0.0",
- "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.0.0",
]
[[package]]
name = "gcc"
-version = "0.3.26"
+version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
"build_helper 0.1.0",
"collections 0.0.0",
"core 0.0.0",
- "gcc 0.3.26 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.0.0",
"panic_abort 0.0.0",
"panic_unwind 0.0.0",
[features]
jemalloc = ["std/jemalloc"]
debug-jemalloc = ["std/debug-jemalloc"]
+backtrace = ["std/backtrace"]
return nullptr;
}
+#if LLVM_VERSION_MINOR <= 8
ErrorOr<std::unique_ptr<Archive>> archive_or =
+#else
+ Expected<std::unique_ptr<Archive>> archive_or =
+#endif
Archive::create(buf_or.get()->getMemBufferRef());
if (!archive_or) {
+#if LLVM_VERSION_MINOR <= 8
LLVMRustSetLastError(archive_or.getError().message().c_str());
+#else
+ LLVMRustSetLastError(toString(archive_or.takeError()).c_str());
+#endif
return nullptr;
}
struct RustArchiveIterator {
Archive::child_iterator cur;
Archive::child_iterator end;
+#if LLVM_VERSION_MINOR >= 9
+ Error err;
+#endif
};
extern "C" RustArchiveIterator*
LLVMRustArchiveIteratorNew(RustArchive *ra) {
Archive *ar = ra->getBinary();
RustArchiveIterator *rai = new RustArchiveIterator();
+#if LLVM_VERSION_MINOR <= 8
rai->cur = ar->child_begin();
+#else
+ rai->cur = ar->child_begin(rai->err);
+ if (rai->err) {
+ LLVMRustSetLastError(toString(std::move(rai->err)).c_str());
+ return NULL;
+ }
+#endif
rai->end = ar->child_end();
return rai;
}
extern "C" const Archive::Child*
LLVMRustArchiveIteratorNext(RustArchiveIterator *rai) {
+#if LLVM_VERSION_MINOR >= 9
+ if (rai->err) {
+ LLVMRustSetLastError(toString(std::move(rai->err)).c_str());
+ return NULL;
+ }
+#endif
if (rai->cur == rai->end)
return NULL;
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_MINOR == 8
const ErrorOr<Archive::Child>* cur = rai->cur.operator->();
if (!*cur) {
LLVMRustSetLastError(cur->getError().message().c_str());
const LLVMRustArchiveMember **NewMembers,
bool WriteSymbtab,
Archive::Kind Kind) {
+
+#if LLVM_VERSION_MINOR <= 8
std::vector<NewArchiveIterator> Members;
+#else
+ std::vector<NewArchiveMember> Members;
+#endif
for (size_t i = 0; i < NumMembers; i++) {
auto Member = NewMembers[i];
assert(Member->name);
if (Member->filename) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_MINOR >= 9
+ Expected<NewArchiveMember> MOrErr = NewArchiveMember::getFile(Member->filename, true);
+ if (!MOrErr) {
+ LLVMRustSetLastError(toString(MOrErr.takeError()).c_str());
+ return -1;
+ }
+ Members.push_back(std::move(*MOrErr));
+#elif LLVM_VERSION_MINOR == 8
Members.push_back(NewArchiveIterator(Member->filename));
#else
Members.push_back(NewArchiveIterator(Member->filename, Member->name));
#endif
} else {
+#if LLVM_VERSION_MINOR <= 8
Members.push_back(NewArchiveIterator(Member->child, Member->name));
+#else
+ Expected<NewArchiveMember> MOrErr = NewArchiveMember::getOldMember(Member->child, true);
+ if (!MOrErr) {
+ LLVMRustSetLastError(toString(MOrErr.takeError()).c_str());
+ return -1;
+ }
+ Members.push_back(std::move(*MOrErr));
+#endif
}
}
#if LLVM_VERSION_MINOR >= 8
const char *cpu,
const char *feature,
CodeModel::Model CM,
- Reloc::Model RM,
+ LLVMRelocMode Reloc,
CodeGenOpt::Level OptLevel,
bool UseSoftFloat,
bool PositionIndependentExecutable,
bool FunctionSections,
bool DataSections) {
+
+#if LLVM_VERSION_MINOR <= 8
+ Reloc::Model RM;
+#else
+ Optional<Reloc::Model> RM;
+#endif
+ switch (Reloc){
+ case LLVMRelocStatic:
+ RM = Reloc::Static;
+ break;
+ case LLVMRelocPIC:
+ RM = Reloc::PIC_;
+ break;
+ case LLVMRelocDynamicNoPic:
+ RM = Reloc::DynamicNoPIC;
+ break;
+ default:
+#if LLVM_VERSION_MINOR <= 8
+ RM = Reloc::Default;
+#endif
+ break;
+ }
+
std::string Error;
Triple Trip(Triple::normalize(triple));
const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Trip.getTriple(),
}
TargetOptions Options;
+#if LLVM_VERSION_MINOR <= 8
Options.PositionIndependentExecutable = PositionIndependentExecutable;
+#endif
+
Options.FloatABIType = FloatABI::Default;
if (UseSoftFloat) {
Options.FloatABIType = FloatABI::Soft;
// similar code in clang's BackendUtil.cpp file.
extern "C" void
LLVMRustRunFunctionPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
- FunctionPassManager *P = unwrap<FunctionPassManager>(PM);
+ llvm::legacy::FunctionPassManager *P = unwrap<llvm::legacy::FunctionPassManager>(PM);
P->doInitialization();
for (Module::iterator I = unwrap(M)->begin(),
E = unwrap(M)->end(); I != E; ++I)
LLVMModuleRef M,
const char *path,
TargetMachine::CodeGenFileType FileType) {
- PassManager *PM = unwrap<PassManager>(PMR);
+ llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
std::string ErrorInfo;
std::error_code EC;
LLVMRustPrintModule(LLVMPassManagerRef PMR,
LLVMModuleRef M,
const char* path) {
- PassManager *PM = unwrap<PassManager>(PMR);
+ llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
std::string ErrorInfo;
std::error_code EC;
extern "C" void
LLVMRustRunRestrictionPass(LLVMModuleRef M, char **symbols, size_t len) {
- PassManager passes;
+ llvm::legacy::PassManager passes;
+
+#if LLVM_VERSION_MINOR <= 8
ArrayRef<const char*> ref(symbols, len);
passes.add(llvm::createInternalizePass(ref));
+#else
+ auto PreserveFunctions = [=](const GlobalValue &GV) {
+ for (size_t i=0; i<len; i++) {
+ if (GV.getName() == symbols[i]) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+ passes.add(llvm::createInternalizePass(PreserveFunctions));
+#endif
+
passes.run(*unwrap(M));
}
LLVMRustGetModuleDataLayout(LLVMModuleRef M) {
return wrap(&unwrap(M)->getDataLayout());
}
+
+extern "C" void
+LLVMRustSetModulePIELevel(LLVMModuleRef M) {
+#if LLVM_VERSION_MINOR >= 9
+ unwrap(M)->setPIELevel(PIELevel::Level::Large);
+#endif
+}
return wrap(Type::getMetadataTy(*unwrap(C)));
}
-extern "C" void LLVMAddCallSiteAttribute(LLVMValueRef Instr, unsigned index, uint64_t Val) {
+extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned index, uint64_t Val) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
AttrBuilder B;
B.addRawValue(Val);
return wrap(unwrap(B)->Insert(si));
}
-extern "C" LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B,
+extern "C" LLVMValueRef LLVMRustBuildAtomicCmpXchg(LLVMBuilderRef B,
LLVMValueRef target,
LLVMValueRef old,
LLVMValueRef source,
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2016-06-23
+2016-07-25b
#![feature(plugin)]
#![plugin(lint_plugin_test)]
#![forbid(test_lint)]
+//~^ NOTE lint level defined here
+//~| NOTE `forbid` lint level set here
fn lintme() { } //~ ERROR item is named 'lintme'
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-extern "rust-call" { fn foo(x: u8, ...); } //~ ERROR E0045
+extern "Rust" { fn foo(x: u8, ...); } //~ ERROR E0045
fn main() {
}
let _x: i32 = [1, 2, 3];
//~^ ERROR mismatched types
//~| expected type `i32`
- //~| found type `[_; 3]`
+ //~| found type `[{integer}; 3]`
//~| expected i32, found array of 3 elements
let x: &[i32] = &[1, 2, 3];
#![allow(dead_code)]
#![feature(rustc_attrs)]
-#![feature(unboxed_closures)]
#![deny(hr_lifetime_in_assoc_type)]
trait Foo<'a> {
#![allow(dead_code, unused_variables)]
#![deny(hr_lifetime_in_assoc_type)]
-#![feature(unboxed_closures)]
use std::str::Chars;
static i: String = 10;
//~^ ERROR mismatched types
//~| expected type `std::string::String`
-//~| found type `_`
+//~| found type `{integer}`
//~| expected struct `std::string::String`, found integral variable
fn main() { println!("{}", i); }
// Ensure that invoking a closure counts as a unique immutable borrow
-#![feature(unboxed_closures)]
-
type Fn<'a> = Box<FnMut() + 'a>;
struct Test<'a> {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(overloaded_calls, unboxed_closures)]
-
fn a<F:Fn(isize, isize) -> isize>(mut f: F) {
let g = &mut f;
f(1, 2); //~ ERROR cannot borrow `f` as immutable
f(&x);
//~^ ERROR mismatched types
//~| expected type `&mut i32`
- //~| found type `&_`
+ //~| found type `&{integer}`
//~| values differ in mutability
}
let _: &[i32] = [0];
//~^ ERROR mismatched types
//~| expected type `&[i32]`
- //~| found type `[_; 1]`
+ //~| found type `[{integer}; 1]`
//~| expected &-ptr, found array of 1 elements
}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-extern "vectorcall" { //~ ERROR vectorcall is experimental and subject to change
- fn bar();
-}
-
-extern "vectorcall" fn baz() { //~ ERROR vectorcall is experimental and subject to change
-}
-
-fn main() {
-}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Functions
+extern "rust-intrinsic" fn f1() {} //~ ERROR intrinsics are subject to change
+extern "platform-intrinsic" fn f2() {} //~ ERROR platform intrinsics are experimental
+extern "vectorcall" fn f3() {} //~ ERROR vectorcall is experimental and subject to change
+extern "rust-call" fn f4() {} //~ ERROR rust-call ABI is subject to change
+
+// Methods in trait definition
+trait Tr {
+ extern "rust-intrinsic" fn m1(); //~ ERROR intrinsics are subject to change
+ extern "platform-intrinsic" fn m2(); //~ ERROR platform intrinsics are experimental
+ extern "vectorcall" fn m3(); //~ ERROR vectorcall is experimental and subject to change
+ extern "rust-call" fn m4(); //~ ERROR rust-call ABI is subject to change
+
+ extern "rust-intrinsic" fn dm1() {} //~ ERROR intrinsics are subject to change
+ extern "platform-intrinsic" fn dm2() {} //~ ERROR platform intrinsics are experimental
+ extern "vectorcall" fn dm3() {} //~ ERROR vectorcall is experimental and subject to change
+ extern "rust-call" fn dm4() {} //~ ERROR rust-call ABI is subject to change
+}
+
+struct S;
+
+// Methods in trait impl
+impl Tr for S {
+ extern "rust-intrinsic" fn m1() {} //~ ERROR intrinsics are subject to change
+ extern "platform-intrinsic" fn m2() {} //~ ERROR platform intrinsics are experimental
+ extern "vectorcall" fn m3() {} //~ ERROR vectorcall is experimental and subject to change
+ extern "rust-call" fn m4() {} //~ ERROR rust-call ABI is subject to change
+}
+
+// Methods in inherent impl
+impl S {
+ extern "rust-intrinsic" fn im1() {} //~ ERROR intrinsics are subject to change
+ extern "platform-intrinsic" fn im2() {} //~ ERROR platform intrinsics are experimental
+ extern "vectorcall" fn im3() {} //~ ERROR vectorcall is experimental and subject to change
+ extern "rust-call" fn im4() {} //~ ERROR rust-call ABI is subject to change
+}
+
+// Function pointer types
+type A1 = extern "rust-intrinsic" fn(); //~ ERROR intrinsics are subject to change
+type A2 = extern "platform-intrinsic" fn(); //~ ERROR platform intrinsics are experimental
+type A3 = extern "vectorcall" fn(); //~ ERROR vectorcall is experimental and subject to change
+type A4 = extern "rust-call" fn(); //~ ERROR rust-call ABI is subject to change
+
+// Foreign modules
+extern "rust-intrinsic" {} //~ ERROR intrinsics are subject to change
+extern "platform-intrinsic" {} //~ ERROR platform intrinsics are experimental
+extern "vectorcall" {} //~ ERROR vectorcall is experimental and subject to change
+extern "rust-call" {} //~ ERROR rust-call ABI is subject to change
+
+fn main() {}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-extern "rust-call" fn foo() { } //~ ERROR rust-call ABI is subject to change
-
-trait Foo {
- extern "rust-call" fn foo();
-}
-
-impl Foo for i32 {
- extern "rust-call" fn foo() { } //~ ERROR rust-call ABI is subject to change
-}
-
-fn main() { }
#![allow(dead_code)]
fn foo<F: Fn()>(mut f: F) {
- f.call(()); //~ ERROR explicit use of unboxed closure method `call`
- f.call_mut(()); //~ ERROR explicit use of unboxed closure method `call_mut`
- f.call_once(()); //~ ERROR explicit use of unboxed closure method `call_once`
+ f.call(()); //~ ERROR use of unstable library feature 'fn_traits'
+ f.call_mut(()); //~ ERROR use of unstable library feature 'fn_traits'
+ f.call_once(()); //~ ERROR use of unstable library feature 'fn_traits'
}
fn main() {}
#![allow(dead_code)]
-fn foo<F: Fn()>(mut f: F, mut g: F) {
- Fn::call(&g, ()); //~ ERROR explicit use of unboxed closure method `call`
- FnMut::call_mut(&mut g, ()); //~ ERROR explicit use of unboxed closure method `call_mut`
- FnOnce::call_once(g, ()); //~ ERROR explicit use of unboxed closure method `call_once`
+fn foo<F: Fn()>(mut f: F) {
+ Fn::call(&f, ()); //~ ERROR use of unstable library feature 'fn_traits'
+ FnMut::call_mut(&mut f, ()); //~ ERROR use of unstable library feature 'fn_traits'
+ FnOnce::call_once(f, ()); //~ ERROR use of unstable library feature 'fn_traits'
}
fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
#![feature(box_syntax)]
fn needs_fn<F>(x: F) where F: Fn(isize) -> isize {}
x = 5;
//~^ ERROR mismatched types
//~| expected type `std::option::Option<usize>`
- //~| found type `_`
+ //~| found type `{integer}`
//~| expected enum `std::option::Option`, found integral variable
}
if let Some(b) = None { //~ ERROR: `if let` arms have incompatible types
//~^ expected (), found integral variable
//~| expected type `()`
- //~| found type `_`
+ //~| found type `{integer}`
()
} else { //~ NOTE: `if let` arm with an incompatible type
1
//~^ ERROR invalid reference to argument `0` (no arguments given)
//~^^ ERROR invalid reference to argument `1` (no arguments given)
+ // bad named arguments, #35082
+
+ format!("{valuea} {valueb}", valuea=5, valuec=7);
+ //~^ ERROR there is no argument named `valueb`
+ //~^^ ERROR named argument never used
+
// bad syntax of the format string
format!("{"); //~ ERROR: expected `'}'` but string was terminated
fn main() {
fn bar<T>(_: T) {}
- [0][0u8]; //~ ERROR: `[_]: std::ops::Index<u8>` is not satisfied
+ [0][0u8]; //~ ERROR: `[{integer}]: std::ops::Index<u8>` is not satisfied
[0][0]; // should infer to be a usize
let mut x = 2;
x = 5.0;
//~^ ERROR mismatched types
- //~| expected type `_`
- //~| found type `_`
+ //~| expected type `{integer}`
+ //~| found type `{float}`
//~| expected integral variable, found floating-point variable
}
let _x: usize = match Some(1) {
Ok(u) => u,
//~^ ERROR mismatched types
- //~| expected type `std::option::Option<_>`
+ //~| expected type `std::option::Option<{integer}>`
//~| found type `std::result::Result<_, _>`
//~| expected enum `std::option::Option`, found enum `std::result::Result`
Err(e) => panic!(e)
//~^ ERROR mismatched types
- //~| expected type `std::option::Option<_>`
+ //~| expected type `std::option::Option<{integer}>`
//~| found type `std::result::Result<_, _>`
//~| expected enum `std::option::Option`, found enum `std::result::Result`
};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(overloaded_calls, unboxed_closures)]
-
// Make sure we don't ICE when making an overloaded call with the
// wrong arity.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(overloaded_calls)]
-
fn f<'r>(p: &'r mut fn(p: &mut ())) {
(*p)(()) //~ ERROR mismatched types
//~| expected type `&mut ()`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
pub fn foo<'a, F: Fn(&'a ())>(bar: F) {
bar.call((
&(), //~ ERROR borrowed value does not live long enough
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::marker;
struct B<T>(marker::PhantomData<T>);
fn main() {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
(|| Box::new(*(&[0][..])))();
- //~^ ERROR `[_]: std::marker::Sized` is not satisfied
+ //~^ ERROR `[{integer}]: std::marker::Sized` is not satisfied
}
// when a type error or unconstrained type variable propagates
// into it.
-#![feature(unboxed_closures)]
-
fn main() {
(return)((),());
//~^ ERROR the type of this value must be known
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
fn main() {
"".homura()(); //~ ERROR no method named `homura` found
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
#![allow(dead_code)]
type foo = fn(&u8, &u8) -> &u8; //~ ERROR missing lifetime specifier
fn main() {
if let Some(homura) = Some("madoka") { //~ ERROR missing an else clause
//~| expected type `()`
- //~| found type `_`
+ //~| found type `{integer}`
//~| expected (), found integral variable
765
};
macro_rules! macro_panic {
($not_a_function:expr, $some_argument:ident) => {
$not_a_function($some_argument)
- //~^ ERROR expected function, found `_`
+ //~^ ERROR expected function, found `{integer}`
}
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This is a regression test for a problem encountered around upvar
+// inference and trait caching: in particular, we were entering a
+// temporary closure kind during inference, and then caching results
+// based on that temporary kind, which led to no error being reported
+// in this particular test.
+
+fn main() {
+ let inc = || {};
+ inc();
+
+ fn apply<F>(f: F) where F: Fn() {
+ f()
+ }
+
+ let mut farewell = "goodbye".to_owned();
+ let diary = || { //~ ERROR E0525
+ farewell.push_str("!!!");
+ println!("Then I screamed {}.", farewell);
+ };
+
+ apply(diary);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Bar<T> {
+ inner: Foo<T> //~ ERROR type name `Foo` is undefined or not in scope
+}
+
+enum Baz<T> {
+ Foo(Foo<T>) //~ ERROR type name `Foo` is undefined or not in scope
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt;
+
+pub trait MethodType {
+ type GetProp: ?Sized;
+}
+
+pub struct MTFn;
+
+impl<'a> MethodType for MTFn { //~ ERROR E0207
+ type GetProp = fmt::Debug + 'a;
+}
+
+fn bad(a: Box<<MTFn as MethodType>::GetProp>) -> Box<fmt::Debug+'static> {
+ a
+}
+
+fn dangling(a: &str) -> Box<fmt::Debug> {
+ bad(Box::new(a))
+}
+
+fn main() {
+ let mut s = "hello".to_string();
+ let x = dangling(&s);
+ s = String::new();
+ println!("{:?}", x);
+}
} else if false {
//~^ ERROR if may be missing an else clause
//~| expected type `()`
-//~| found type `_`
+//~| found type `{integer}`
//~| expected (), found integral variable
1
};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
fn id<T>(t: T) -> T { t }
fn f<'r, T>(v: &'r T) -> Box<FnMut() -> T + 'r> {
fn main() {
match 42 { A => () }
//~^ ERROR mismatched types
- //~| expected type `_`
+ //~| expected type `{integer}`
//~| found type `(isize, isize)`
//~| expected integral variable, found tuple
}
match &Some(42) {
Some(x) => (),
//~^ ERROR mismatched types
- //~| expected type `&std::option::Option<_>`
+ //~| expected type `&std::option::Option<{integer}>`
//~| found type `std::option::Option<_>`
//~| expected &-ptr, found enum `std::option::Option`
None => ()
//~^ ERROR mismatched types
- //~| expected type `&std::option::Option<_>`
+ //~| expected type `&std::option::Option<{integer}>`
//~| found type `std::option::Option<_>`
//~| expected &-ptr, found enum `std::option::Option`
}
fn main() {
let x: Box<_> = box 3;
take_param(&x);
- //~^ ERROR `Box<_>: std::marker::Copy` is not satisfied
+ //~^ ERROR `Box<{integer}>: std::marker::Copy` is not satisfied
}
// except according to those terms.
#![forbid(deprecated)]
+//~^ NOTE `forbid` lint level set here
#[allow(deprecated)] //~ ERROR allow(deprecated) overruled by outer forbid(deprecated)
fn main() {
10 ... "what" => ()
};
//~^^ ERROR only char and numeric types are allowed in range
- //~| start type: _
+ //~| start type: {integer}
//~| end type: &'static str
match 5 {
_ => { }
};
//~^^^ ERROR mismatched types
- //~| expected type `_`
+ //~| expected type `{integer}`
//~| found type `char`
}
};
match &[0, 1, 2] {
- [..] => {} //~ ERROR expected an array or slice, found `&[_; 3]`
+ [..] => {} //~ ERROR expected an array or slice, found `&[{integer}; 3]`
};
match &[0, 1, 2] {
//~| expected &-ptr, found struct `Foo`
Foo::bar(&42); //~ ERROR mismatched types
//~| expected type `&Foo`
- //~| found type `&_`
+ //~| found type `&{integer}`
//~| expected struct `Foo`, found integral variable
}
// bound must be noncopyable. For details see
// http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/
-#![feature(unboxed_closures)]
-
struct R<'a> {
// This struct is needed to create the
// otherwise infinite type of a fn that
// (separate lines to ensure the spans are accurate)
let &_ //~ ERROR mismatched types
- //~| expected type `&mut _`
+ //~| expected type `&mut {integer}`
//~| found type `&_`
//~| values differ in mutability
= foo;
let bar = &1;
let &_ = bar;
let &mut _ //~ ERROR mismatched types
- //~| expected type `&_`
+ //~| expected type `&{integer}`
//~| found type `&mut _`
//~| values differ in mutability
= bar;
fn main() {
let x = Rc::new(5);
bar(x);
- //~^ ERROR `std::rc::Rc<_>: std::marker::Send` is not satisfied
+ //~^ ERROR `std::rc::Rc<{integer}>: std::marker::Send` is not satisfied
}
}
fn test_single1() {
- use foo1::Bar; //~ ERROR function `Bar` is private
+ use foo1::Bar;
- Bar();
+ Bar(); //~ ERROR unresolved name `Bar`
}
fn test_list1() {
- use foo1::{Bar,Baz}; //~ ERROR `Bar` is private
+ use foo1::{Bar,Baz};
- Bar();
+ Bar(); //~ ERROR unresolved name `Bar`
}
// private type, public value
}
fn test_single2() {
- use foo2::Bar; //~ ERROR trait `Bar` is private
+ use foo2::Bar;
- let _x : Box<Bar>;
+ let _x : Box<Bar>; //~ ERROR type name `Bar` is undefined
}
fn test_list2() {
- use foo2::{Bar,Baz}; //~ ERROR `Bar` is private
+ use foo2::{Bar,Baz};
- let _x: Box<Bar>;
+ let _x: Box<Bar>; //~ ERROR type name `Bar` is undefined
}
// neither public
// Unsized type.
let arr: &[_] = &[1, 2, 3];
let range = *arr..;
- //~^ ERROR `[_]: std::marker::Sized` is not satisfied
+ //~^ ERROR `[{integer}]: std::marker::Sized` is not satisfied
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures, overloaded_calls)]
-
use std::ops::FnMut;
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
fn with_int(f: &mut FnMut(&isize)) {
}
// Test that closures cannot subvert aliasing restrictions
-#![feature(overloaded_calls, unboxed_closures)]
-
fn main() {
// Unboxed closure case
{
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
struct closure_box<'a> {
cl: Box<FnMut() + 'a>,
}
let d = [0; 0.5];
//~^ ERROR mismatched types
//~| expected type `usize`
- //~| found type `_`
+ //~| found type `{float}`
//~| expected usize, found floating-point variable
//~| ERROR expected usize for repeat count, found float [E0306]
let e = [0; "foo"];
// scope (in this case, the enum).
trait TraitA<A> {
- fn outer(self) {
+ fn outer(&self) {
enum Foo<B> {
- //~^ ERROR parameter `B` is never used
Variance(A)
//~^ ERROR can't use type parameters from outer function
}
}
trait TraitB<A> {
- fn outer(self) {
+ fn outer(&self) {
struct Foo<B>(A);
//~^ ERROR can't use type parameters from outer function
- //~^^ ERROR parameter `B` is never used
}
}
trait TraitC<A> {
- fn outer(self) {
+ fn outer(&self) {
struct Foo<B> { a: A }
//~^ ERROR can't use type parameters from outer function
- //~^^ ERROR parameter `B` is never used
}
}
trait TraitD<A> {
- fn outer(self) {
+ fn outer(&self) {
fn foo<B>(a: A) { }
//~^ ERROR can't use type parameters from outer function
}
match Foo(1.1, marker::PhantomData) {
1 => {}
//~^ ERROR mismatched types
- //~| expected type `Foo<_, _>`
- //~| found type `_`
+ //~| expected type `Foo<{float}, _>`
+ //~| found type `{integer}`
//~| expected struct `Foo`, found integral variable
}
pub fn main() {
let s: &str = "hello";
- let c: u8 = s[4]; //~ ERROR `str: std::ops::Index<_>` is not satisfied
+ let c: u8 = s[4]; //~ ERROR `str: std::ops::Index<{integer}>` is not satisfied
}
//~| expected struct `Foo`, found struct `Bar`
let f__isize = Foo { a: 2, ..4 }; //~ ERROR mismatched types
//~| expected type `Foo`
- //~| found type `_`
+ //~| found type `{integer}`
//~| expected struct `Foo`, found integral variable
}
//~| expected struct `Foo`, found struct `Bar`
static foo_i: Foo = Foo { a: 2, ..4 }; //~ ERROR mismatched types
//~| expected type `Foo`
- //~| found type `_`
+ //~| found type `{integer}`
//~| expected struct `Foo`, found integral variable
fn main() {
fn main() {
is_ee(4);
- //~^ ERROR overflow evaluating the requirement `_: Tweedle
+ //~^ ERROR overflow evaluating the requirement `{integer}: Tweedle
}
let y = first ((1,2.0,3));
//~^ ERROR mismatched types
//~| expected type `(isize, f64)`
- //~| found type `(isize, f64, _)`
+ //~| found type `(isize, f64, {integer})`
//~| expected a tuple with 2 elements, found one with 3 elements
let y = first ((1,));
tuple.0;
tuple.1;
tuple.2;
- //~^ ERROR attempted out-of-bounds tuple index `2` on type `(_, _)`
+ //~^ ERROR attempted out-of-bounds tuple index `2` on type `({integer}, {integer})`
}
fn main() { let a: bool = 1; let b: i32 = true; }
//~^ ERROR mismatched types
//~| expected type `bool`
-//~| found type `_`
+//~| found type `{integer}`
//~| expected bool, found integral variable
//~| ERROR mismatched types
//~| expected i32, found bool
fn main() {
let us = UnsafeCell::new(MySync{u: UnsafeCell::new(0)});
test(us);
- //~^ ERROR `std::cell::UnsafeCell<MySync<_>>: std::marker::Sync` is not satisfied
+ //~^ ERROR `std::cell::UnsafeCell<MySync<{integer}>>: std::marker::Sync` is not satisfied
let uns = UnsafeCell::new(NoSync);
test(uns);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
// Test that even unboxed closures that are capable of mutating their
// environment cannot mutate captured variables that have not been
// declared mutable (#18335)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
// Test that an unboxed closure that captures a free variable by
// reference cannot escape the region of that variable.
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
fn f<F:Nonexist(isize) -> isize>(x: F) {} //~ ERROR trait `Nonexist` is not in scope
type Typedef = isize;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
// Test that an unboxed closure that mutates a free variable will
// cause borrow conflicts.
// That a closure whose expected argument types include two distinct
// bound regions.
-#![feature(unboxed_closures)]
-
use std::cell::Cell;
fn doit<T,F>(val: T, f: &F)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
fn main() {
let mut zero = || {};
let () = zero.call_mut(());
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::ops::FnMut;
pub fn main() {
// Tests that unsafe extern fn pointers do not implement any Fn traits.
-#![feature(unboxed_closures)]
-
use std::ops::{Fn,FnMut,FnOnce};
unsafe fn square(x: &isize) -> isize { (*x) * (*x) }
// Tests that unsafe extern fn pointers do not implement any Fn traits.
-#![feature(unboxed_closures)]
-
use std::ops::{Fn,FnMut,FnOnce};
extern "C" fn square(x: &isize) -> isize { (*x) * (*x) }
// Tests that unsafe extern fn pointers do not implement any Fn traits.
-#![feature(unboxed_closures)]
-
use std::ops::{Fn,FnMut,FnOnce};
unsafe fn square(x: isize) -> isize { x * x }
fn call_it<B:TraitB>(b: B) -> isize {
let y = 4;
- b.gimme_an_a(y) //~ ERROR `_: TraitA` is not satisfied
+ b.gimme_an_a(y) //~ ERROR `{integer}: TraitA` is not satisfied
}
fn main() {
// lldb-check:[...]$2 = 5
#![allow(unused_variables)]
-#![feature(unboxed_closures, box_syntax)]
+#![feature(box_syntax)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
// lldb-command:print *owned
// lldb-check:[...]$9 = 6
-#![feature(unboxed_closures, box_syntax)]
+#![feature(box_syntax)]
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type="rlib"]
+
+#[cfg(rpass1)]
+pub type X = u32;
+
+#[cfg(rpass2)]
+pub type X = i32;
+
+// this version doesn't actually change anything:
+#[cfg(rpass3)]
+pub type X = i32;
+
+pub type Y = char;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Same test as `type_alias_cross_crate`, but with
+// `no-prefer-dynamic`, ensuring that we test what happens when we
+// build rlibs (before we were only testing dylibs, which meant we
+// didn't realize we had to preserve a `bc` file as well).
+
+// aux-build:a.rs
+// revisions:rpass1 rpass2 rpass3
+// no-prefer-dynamic
+
+
+#![feature(rustc_attrs)]
+
+extern crate a;
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+#[rustc_clean(label="TypeckItemBody", cfg="rpass3")]
+pub fn use_X() -> u32 {
+ let x: a::X = 22;
+ x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+#[rustc_clean(label="TypeckItemBody", cfg="rpass3")]
+pub fn use_Y() {
+ let x: a::Y = 'c';
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// A variant of the first "spike" test that serves to test the
+// `rustc_partition_reused` and `rustc_partition_translated` tests.
+// Here we change and say that the `x` module will be reused (when in
+// fact it will not), and then indicate that the test itself
+// should-fail (because an error will be reported, and hence the
+// revision rpass2 will not compile, despite being named rpass).
+
+// revisions:rpass1 rpass2
+// should-fail
+
+#![feature(rustc_attrs)]
+
+#![rustc_partition_reused(module="spike_neg1", cfg="rpass2")]
+#![rustc_partition_reused(module="spike_neg1-x", cfg="rpass2")] // this is wrong!
+#![rustc_partition_reused(module="spike_neg1-y", cfg="rpass2")]
+
+mod x {
+ pub struct X {
+ x: u32, y: u32,
+ }
+
+ #[cfg(rpass1)]
+ fn make() -> X {
+ X { x: 22, y: 0 }
+ }
+
+ #[cfg(rpass2)]
+ fn make() -> X {
+ X { x: 11, y: 11 }
+ }
+
+ pub fn new() -> X {
+ make()
+ }
+
+ pub fn sum(x: &X) -> u32 {
+ x.x + x.y
+ }
+}
+
+mod y {
+ use x;
+
+ pub fn assert_sum() -> bool {
+ let x = x::new();
+ x::sum(&x) == 22
+ }
+}
+
+pub fn main() {
+ y::assert_sum();
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// A variant of the first "spike" test that serves to test the
+// `rustc_partition_reused` and `rustc_partition_translated` tests.
+// Here we change and say that the `y` module will be translated (when
+// in fact it will not), and then indicate that the test itself
+// should-fail (because an error will be reported, and hence the
+// revision rpass2 will not compile, despite being named rpass).
+
+// revisions:rpass1 rpass2
+// should-fail
+
+#![feature(rustc_attrs)]
+
+#![rustc_partition_reused(module="spike_neg2", cfg="rpass2")]
+#![rustc_partition_translated(module="spike_neg2-x", cfg="rpass2")]
+#![rustc_partition_translated(module="spike_neg2-y", cfg="rpass2")] // this is wrong!
+
+mod x {
+ pub struct X {
+ x: u32, y: u32,
+ }
+
+ #[cfg(rpass1)]
+ fn make() -> X {
+ X { x: 22, y: 0 }
+ }
+
+ #[cfg(rpass2)]
+ fn make() -> X {
+ X { x: 11, y: 11 }
+ }
+
+ pub fn new() -> X {
+ make()
+ }
+
+ pub fn sum(x: &X) -> u32 {
+ x.x + x.y
+ }
+}
+
+mod y {
+ use x;
+
+ pub fn assert_sum() -> bool {
+ let x = x::new();
+ x::sum(&x) == 22
+ }
+}
+
+pub fn main() {
+ y::assert_sum();
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// A first "spike" for incremental compilation: here, we change the
+// content of the `make` function, and we find that we can reuse the
+// `y` module entirely (but not the `x` module).
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+#![rustc_partition_reused(module="spike", cfg="rpass2")]
+#![rustc_partition_translated(module="spike-x", cfg="rpass2")]
+#![rustc_partition_reused(module="spike-y", cfg="rpass2")]
+
+mod x {
+ pub struct X {
+ x: u32, y: u32,
+ }
+
+ #[cfg(rpass1)]
+ fn make() -> X {
+ X { x: 22, y: 0 }
+ }
+
+ #[cfg(rpass2)]
+ fn make() -> X {
+ X { x: 11, y: 11 }
+ }
+
+ #[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+ #[rustc_clean(label="ItemSignature", cfg="rpass2")]
+ pub fn new() -> X {
+ make()
+ }
+
+ #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+ #[rustc_clean(label="ItemSignature", cfg="rpass2")]
+ pub fn sum(x: &X) -> u32 {
+ x.x + x.y
+ }
+}
+
+mod y {
+ use x;
+
+ #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+ pub fn assert_sum() -> bool {
+ let x = x::new();
+ x::sum(&x) == 22
+ }
+}
+
+pub fn main() {
+ y::assert_sum();
+}
+++ /dev/null
--include ../tools.mk
-
-# FIXME: ignore freebsd
-# This is a basic test of LLVM ExecutionEngine functionality using compiled
-# Rust code built using the `rustc` crate.
-
-ifeq ($(filter executionengine,$(LLVM_COMPONENTS)),executionengine)
-
-ifneq ($(shell uname),FreeBSD)
-all:
- $(RUSTC) test.rs
- $(call RUN,test $(RUSTC))
-else
-all:
-
-endif
-
-else
-all:
-
-endif
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private)]
-#![feature(libc)]
-
-extern crate libc;
-extern crate rustc;
-extern crate rustc_driver;
-extern crate rustc_lint;
-extern crate rustc_llvm as llvm;
-extern crate rustc_metadata;
-extern crate rustc_resolve;
-extern crate rustc_errors;
-extern crate rustc_errors as errors;
-#[macro_use] extern crate syntax;
-
-use std::ffi::{CStr, CString};
-use std::mem::transmute;
-use std::path::PathBuf;
-use std::rc::Rc;
-use std::thread::Builder;
-
-use rustc::dep_graph::DepGraph;
-use rustc::hir::map as ast_map;
-use rustc::middle::cstore::LinkagePreference;
-use rustc::ty;
-use rustc::session::config::{self, basic_options, build_configuration, Input, Options};
-use rustc::session::build_session;
-use rustc_driver::{driver, abort_on_err};
-use rustc_resolve::MakeGlobMap;
-use rustc_metadata::cstore::CStore;
-use libc::c_void;
-
-use rustc_errors::registry::Registry;
-
-fn main() {
- // Currently trips an assertion on i686-msvc, presumably because the support
- // in LLVM is a little young.
- if cfg!(target_env = "msvc") && cfg!(target_arch = "x86") {
- return
- }
-
- let program = r#"
- #[no_mangle]
- pub static TEST_STATIC: i32 = 42;
- "#;
-
- let program2 = r#"
- #[no_mangle]
- pub fn test_add(a: i32, b: i32) -> i32 { a + b }
- "#;
-
- let mut path = match std::env::args().nth(2) {
- Some(path) => PathBuf::from(&path),
- None => panic!("missing rustc path")
- };
-
- // Remove two segments from rustc path to get sysroot.
- path.pop();
- path.pop();
-
- let mut ee = ExecutionEngine::new(program, path);
-
- let test_static = match ee.get_global("TEST_STATIC") {
- Some(g) => g as *const i32,
- None => panic!("failed to get global")
- };
-
- assert_eq!(unsafe { *test_static }, 42);
-
- ee.add_module(program2);
-
- let test_add: fn(i32, i32) -> i32;
-
- test_add = match ee.get_function("test_add") {
- Some(f) => unsafe { transmute(f) },
- None => panic!("failed to get function")
- };
-
- assert_eq!(test_add(1, 2), 3);
-}
-
-struct ExecutionEngine {
- ee: llvm::ExecutionEngineRef,
- modules: Vec<llvm::ModuleRef>,
- sysroot: PathBuf,
-}
-
-impl ExecutionEngine {
- pub fn new(program: &str, sysroot: PathBuf) -> ExecutionEngine {
- let (llmod, deps) = compile_program(program, sysroot.clone())
- .expect("failed to compile program");
-
- let ee = unsafe { llvm::LLVMBuildExecutionEngine(llmod) };
-
- if ee.is_null() {
- panic!("Failed to create ExecutionEngine: {}", llvm_error());
- }
-
- let ee = ExecutionEngine{
- ee: ee,
- modules: vec![llmod],
- sysroot: sysroot,
- };
-
- ee.load_deps(&deps);
- ee
- }
-
- pub fn add_module(&mut self, program: &str) {
- let (llmod, deps) = compile_program(program, self.sysroot.clone())
- .expect("failed to compile program in add_module");
-
- unsafe { llvm::LLVMExecutionEngineAddModule(self.ee, llmod); }
-
- self.modules.push(llmod);
- self.load_deps(&deps);
- }
-
- /// Returns a raw pointer to the named function.
- pub fn get_function(&mut self, name: &str) -> Option<*const c_void> {
- let s = CString::new(name.as_bytes()).unwrap();
-
- for &m in &self.modules {
- let fv = unsafe { llvm::LLVMGetNamedFunction(m, s.as_ptr()) };
-
- if !fv.is_null() {
- let fp = unsafe { llvm::LLVMGetPointerToGlobal(self.ee, fv) };
-
- assert!(!fp.is_null());
- return Some(fp);
- }
- }
- None
- }
-
- /// Returns a raw pointer to the named global item.
- pub fn get_global(&mut self, name: &str) -> Option<*const c_void> {
- let s = CString::new(name.as_bytes()).unwrap();
-
- for &m in &self.modules {
- let gv = unsafe { llvm::LLVMGetNamedGlobal(m, s.as_ptr()) };
-
- if !gv.is_null() {
- let gp = unsafe { llvm::LLVMGetPointerToGlobal(self.ee, gv) };
-
- assert!(!gp.is_null());
- return Some(gp);
- }
- }
- None
- }
-
- /// Loads all dependencies of compiled code.
- /// Expects a series of paths to dynamic library files.
- fn load_deps(&self, deps: &[PathBuf]) {
- for path in deps {
- let s = match path.as_os_str().to_str() {
- Some(s) => s,
- None => panic!(
- "Could not convert crate path to UTF-8 string: {:?}", path)
- };
- let cs = CString::new(s).unwrap();
-
- let res = unsafe { llvm::LLVMRustLoadDynamicLibrary(cs.as_ptr()) };
-
- if res == 0 {
- panic!("Failed to load crate {:?}: {}",
- path.display(), llvm_error());
- }
- }
- }
-}
-
-impl Drop for ExecutionEngine {
- fn drop(&mut self) {
- unsafe { llvm::LLVMDisposeExecutionEngine(self.ee) };
- }
-}
-
-/// Returns last error from LLVM wrapper code.
-fn llvm_error() -> String {
- String::from_utf8_lossy(
- unsafe { CStr::from_ptr(llvm::LLVMRustGetLastError()).to_bytes() })
- .into_owned()
-}
-
-fn build_exec_options(sysroot: PathBuf) -> Options {
- let mut opts = basic_options();
-
- // librustc derives sysroot from the executable name.
- // Since we are not rustc, we must specify it.
- opts.maybe_sysroot = Some(sysroot);
-
- // Prefer faster build time
- opts.optimize = config::OptLevel::No;
-
- // Don't require a `main` function
- opts.crate_types = vec![config::CrateTypeDylib];
-
- opts
-}
-
-/// Compiles input up to phase 4, translation to LLVM.
-///
-/// Returns the LLVM `ModuleRef` and a series of paths to dynamic libraries
-/// for crates used in the given input.
-fn compile_program(input: &str, sysroot: PathBuf)
- -> Option<(llvm::ModuleRef, Vec<PathBuf>)> {
- let input = Input::Str {
- name: driver::anon_src(),
- input: input.to_string(),
- };
- let thread = Builder::new().name("compile_program".to_string());
-
- let handle = thread.spawn(move || {
- let opts = build_exec_options(sysroot);
- let dep_graph = DepGraph::new(opts.build_dep_graph());
- let cstore = Rc::new(CStore::new(&dep_graph));
- let sess = build_session(opts,
- &dep_graph,
- None,
- Registry::new(&rustc::DIAGNOSTICS),
- cstore.clone());
- rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
-
- let cfg = build_configuration(&sess);
-
- let id = "input".to_string();
-
- let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
-
- let driver::ExpansionResult { defs, analysis, resolutions, mut hir_forest, .. } = {
- driver::phase_2_configure_and_expand(
- &sess, &cstore, krate, &id, None, MakeGlobMap::No, |_| Ok(()),
- ).expect("phase_2 returned `None`")
- };
-
- let arenas = ty::CtxtArenas::new();
- let ast_map = ast_map::map_crate(&mut hir_forest, defs);
-
- abort_on_err(driver::phase_3_run_analysis_passes(
- &sess, ast_map, analysis, resolutions, &arenas, &id,
- |tcx, mir_map, analysis, _| {
-
- let trans = driver::phase_4_translate_to_llvm(tcx, mir_map.unwrap(), analysis);
-
- let crates = tcx.sess.cstore.used_crates(LinkagePreference::RequireDynamic);
-
- // Collect crates used in the session.
- // Reverse order finds dependencies first.
- let deps = crates.into_iter().rev()
- .filter_map(|(_, p)| p).collect();
-
- assert_eq!(trans.modules.len(), 1);
- let llmod = trans.modules[0].llmod;
-
- // Workaround because raw pointers do not impl Send
- let modp = llmod as usize;
-
- (modp, deps)
- }), &sess)
- }).unwrap();
-
- match handle.join() {
- Ok((llmod, deps)) => Some((llmod as llvm::ModuleRef, deps)),
- Err(_) => None
- }
-}
extern crate rustc;
extern crate rustc_driver;
extern crate rustc_llvm;
+extern crate rustc_trans;
#[macro_use] extern crate syntax;
extern crate getopts;
use rustc_driver::{CompilerCalls, Compilation};
use rustc_driver::driver::CompileController;
+use rustc_trans::ModuleSource;
use rustc::session::Session;
use syntax::codemap::FileLoader;
use std::io;
state.session.abort_if_errors();
let trans = state.trans.unwrap();
assert_eq!(trans.modules.len(), 1);
- let rs_llmod = trans.modules[0].llmod;
+ let rs_llmod = match trans.modules[0].source {
+ ModuleSource::Preexisting(_) => unimplemented!(),
+ ModuleSource::Translated(llvm) => llvm.llmod,
+ };
unsafe { rustc_llvm::LLVMDumpModule(rs_llmod) };
});
cc
// making method calls, but only if there aren't any matches without
// it.
-
-#![feature(unboxed_closures)]
-
trait iterable<A> {
fn iterate<F>(&self, blk: F) -> bool where F: FnMut(&A) -> bool;
}
// for `ByRef`. The right answer was to consider the result ambiguous
// until more type information was available.
-#![feature(lang_items, unboxed_closures)]
+#![feature(lang_items)]
#![no_implicit_prelude]
use std::marker::Sized;
// for `ByRef`. The right answer was to consider the result ambiguous
// until more type information was available.
-#![feature(lang_items, unboxed_closures)]
+#![feature(lang_items)]
#![no_implicit_prelude]
use std::marker::Sized;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
#![crate_type = "rlib"]
pub fn inner<F>(f: F) -> F {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::ops::Add;
#[inline]
}
fn expected(fn_name: &str) -> String {
- // FIXME(#32481)
- //
- // On windows, we read the function name from debuginfo using some
- // system APIs. For whatever reason, these APIs seem to use the
- // "name" field, which is only the "relative" name, not the full
- // name with namespace info, so we just see `foo` and not
- // `backtrace::foo` as we see on linux (which uses the linkage
- // name).
- if cfg!(windows) && cfg!(target_env = "msvc") {
- format!(" - {}", fn_name)
- } else {
- format!(" - backtrace::{}", fn_name)
- }
+ format!(" - backtrace::{}", fn_name)
}
fn runtest(me: &str) {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::ops::FnMut;
fn call_f<F:FnMut()>(mut f: F) {
#![allow(unknown_features)]
#![feature(box_syntax)]
-#![feature(unboxed_closures)]
pub fn main() {
let bar: Box<_> = box 3;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-#![feature(unboxed_closures)]
-
fn each<'a,T,F:FnMut(&'a T)>(x: &'a [T], mut f: F) {
for val in x {
f(val)
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
use std::sync::mpsc::channel;
fn foo<F:FnOnce()+Send>(blk: F) {
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
-#![feature(unboxed_closures)]
-
fn call_it<F>(f: F)
where F : FnOnce(String) -> String
{
// except according to those terms.
#![allow(unknown_features)]
-#![feature(unboxed_closures, std_misc)]
+#![feature(std_misc)]
/**
A somewhat reduced test case to expose some Valgrind issues.
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
#![allow(unused_variables)]
#![allow(dead_code)]
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
// Test that `F : Fn(isize) -> isize + Send` is interpreted as two
// distinct bounds on `F`.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
-#![feature(unboxed_closures)]
// Test that `Fn(isize) -> isize + 'static` parses as `(Fn(isize) -> isize) +
// 'static` and not `Fn(isize) -> (isize + 'static)`. The latter would
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-#![feature(unboxed_closures)]
-
// A basic test of using a higher-ranked trait bound.
trait FnLike<A,R> {
// Test HRTB used with the `Fn` trait.
-#![feature(unboxed_closures)]
-
fn foo<F:Fn(&isize)>(f: F) {
let x = 22;
f(&x);
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
fn f<F:FnOnce()>(p: F) {
p();
}
unsafe {
asm!("mov ($1), $0"
: $output_constraint (*wrap(&mut x, "out", &mut history))
- : "r"(&wrap(y, "in", &mut history)));
+ : "r"(&wrap(y, "in", &mut history))
+ :: "volatile");
}
assert_eq!((x,y), (1,1));
let b: &[_] = &["out", "in"];
// ignore-emscripten no threads support
-#![feature(unboxed_closures)]
-
use std::thread;
use std::mem;
// ignore-pretty
#![allow(unknown_features)]
-#![feature(unboxed_closures)]
struct Parser<'a, I, O> {
parse: Box<FnMut(I) -> Result<O, String> + 'a>
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(box_patterns)]
-#![feature(unboxed_closures)]
use std::ops::{Deref, DerefMut};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::marker::PhantomData;
fn main() {
// once closure as an optimization by trans. This used to hit an
// incorrect assert.
-
-#![feature(unboxed_closures)]
-
fn main() {
let x = 2u8;
let y = 3u8;
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
trait Tr {
fn foo(&self);
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
// aux-build:issue-18711.rs
extern crate issue_18711 as issue;
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
fn foo<T, F: FnOnce(T) -> T>(f: F) {}
fn id<'a>(input: &'a u8) -> &'a u8 { input }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::marker::PhantomData;
#[derive(Debug)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// compile-flags: -Z orbit=off
+// (blows the stack with MIR trans and no optimizations)
+
// Tests that the `vec!` macro does not overflow the stack when it is
// given data larger than the stack.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs, unboxed_closures, fn_traits)]
+#![feature(rustc_attrs, fn_traits)]
#[rustc_mir]
fn test1(a: isize, b: (i32, i32), c: &[i32]) -> (isize, (i32, i32), &[i32]) {
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test case tests whether we can handle code bases that contain a high
+// number of closures, something that needs special handling in the MingGW
+// toolchain.
+// See https://github.com/rust-lang/rust/issues/34793 for more information.
+
+// Expand something exponentially
+macro_rules! go_bacterial {
+ ($mac:ident) => ($mac!());
+ ($mac:ident 1 $($t:tt)*) => (
+ go_bacterial!($mac $($t)*);
+ go_bacterial!($mac $($t)*);
+ )
+}
+
+macro_rules! mk_closure {
+ () => ({
+ let c = |a: u32| a + 4;
+ let _ = c(2);
+ })
+}
+
+macro_rules! mk_fn {
+ () => {
+ {
+ fn function() {
+ // Make 16 closures
+ go_bacterial!(mk_closure 1 1 1 1);
+ }
+ let _ = function();
+ }
+ }
+}
+
+fn main() {
+ // Make 2^12 functions, each containing 16 closures,
+ // resulting in 2^16 closures overall.
+ go_bacterial!(mk_fn 1 1 1 1 1 1 1 1 1 1 1 1);
+}
use std::time::Duration;
use std::thread::{self, Builder};
+const TARGET_CNT: usize = 200;
+
fn main() {
// This test has a chance to time out, try to not let it time out
thread::spawn(move|| -> () {
});
let (tx, rx) = channel();
+
let mut spawned_cnt = 0;
- for _ in 0..1000 {
+ for _ in 0..TARGET_CNT {
let tx = tx.clone();
let res = Builder::new().stack_size(64 * 1024).spawn(move|| {
match TcpStream::connect(addr) {
for _ in 0..spawned_cnt {
rx.recv().unwrap();
}
- assert_eq!(spawned_cnt, 1000);
+ assert_eq!(spawned_cnt, TARGET_CNT);
process::exit(0);
}
#![allow(unknown_features)]
#![feature(box_syntax, std_misc)]
-#![feature(unboxed_closures)]
use std::sync::Arc;
use std::sync::mpsc::channel;
// Also acts as a regression test for an ICE (issue #19791)
-#![feature(unboxed_closures, core)]
+#![feature(core)]
use std::any::{Any, TypeId};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-#![feature(lang_items, unboxed_closures)]
+#![feature(lang_items)]
fn a<F:Fn(isize, isize) -> isize>(f: F) -> isize {
f(1, 2)
// Test that you can supply `&F` where `F: FnMut()`.
-
-#![feature(lang_items, unboxed_closures)]
+#![feature(lang_items)]
fn a<F:FnMut() -> i32>(mut f: F) -> i32 {
f()
// Test that you can supply `&F` where `F: Fn()`.
-
-#![feature(lang_items, unboxed_closures)]
+#![feature(lang_items)]
fn a<F:Fn() -> i32>(f: F) -> i32 {
f()
#![allow(unknown_features)]
#![feature(box_syntax)]
-#![feature(unboxed_closures)]
use std::ops::FnMut;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-#![feature(unboxed_closures)]
-
// Test by-ref capture of environment in unboxed closure types
fn call_fn<F: Fn()>(f: F) {
// Test that the call operator autoderefs when calling a bounded type parameter.
-
-#![feature(unboxed_closures)]
-
use std::ops::FnMut;
fn call_with_2(x: &fn(isize) -> isize) -> isize
// Test that the call operator autoderefs when calling a bounded type parameter.
-
-#![feature(unboxed_closures)]
-
use std::ops::FnMut;
fn call_with_2<F>(x: &mut F) -> isize
// Test that the call operator autoderefs when calling to an object type.
#![allow(unknown_features)]
-#![feature(unboxed_closures)]
use std::ops::FnMut;
// except according to those terms.
#![allow(unknown_features)]
-#![feature(unboxed_closures)]
use std::ops::FnMut;
// Test that we mutate a counter on the stack only when we expect to.
-
fn call<F>(f: F) where F : FnOnce() {
f();
}
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
fn main() {
let mut unboxed = || {};
unboxed();
// A battery of tests to ensure destructors of unboxed closure environments
// run at the right times.
-
-#![feature(unboxed_closures)]
-
static mut DROP_COUNT: usize = 0;
fn drop_count() -> usize {
// Checks that extern fn pointers implement the full range of Fn traits.
-
-#![feature(unboxed_closures)]
-#![feature(unboxed_closures)]
-
use std::ops::{Fn,FnMut,FnOnce};
fn square(x: isize) -> isize { x * x }
// Checks that the Fn trait hierarchy rules permit
// any Fn trait to be used where Fn is implemented.
-
#![feature(unboxed_closures, fn_traits)]
use std::ops::{Fn,FnMut,FnOnce};
// Checks that the Fn trait hierarchy rules permit
// FnMut or FnOnce to be used where FnMut is implemented.
-
#![feature(unboxed_closures, fn_traits)]
struct S;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unboxed_closures)]
-
use std::ops::FnMut;
fn call_it<F:FnMut(i32,i32)->i32>(y: i32, mut f: F) -> i32 {
// Test that we are able to infer a suitable kind for this closure
// that is just called (`FnMut`).
-
fn main() {
let mut counter = 0;
// Test that we are able to infer a suitable kind for this `move`
// closure that is just called (`FnMut`).
-
fn main() {
let mut counter = 0;
// Test that we are able to infer a suitable kind for this closure
// that is just called (`FnMut`).
-
fn main() {
let mut counter = 0;
// Test that we can infer the "kind" of an unboxed closure based on
// the expected type.
-
-#![feature(unboxed_closures)]
-
// Test by-ref capture of environment in unboxed closure types
fn call_fn<F: Fn()>(f: F) {
// Test that the type variable in the type(`Vec<_>`) of a closed over
// variable does not interfere with type inference.
-
fn f<F: FnMut()>(mut f: F) {
f();
}
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
#![deny(unused_mut)]
// Test that mutating a mutable upvar in a capture-by-value unboxed
// Test that in a by-ref once closure we move some variables even as
// we capture others by mutable reference.
-
fn call<F>(f: F) where F : FnOnce() {
f();
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-#![feature(unboxed_closures)]
-
use std::ops::FnMut;
pub fn main() {
// Ensures that single-word environments work right in unboxed closures.
// These take a different path in codegen.
-
-#![feature(unboxed_closures)]
-
fn a<F:Fn(isize, isize) -> isize>(f: F) -> isize {
f(1, 2)
}
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
fn main() {
let onetime = |x| x;
onetime(0);
// Test unboxed closure sugar used in object types.
-
#![allow(dead_code)]
-#![feature(unboxed_closures)]
struct Foo<T,U> {
t: T, u: U
//
// compile-flags: -g
-
-#![feature(unboxed_closures)]
-
use std::ptr;
pub fn replace_map<'a, T, F>(src: &mut T, prod: F) where F: FnOnce(T) -> T {
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
fn main() {
let mut zero = || {};
let () = zero();
// pretty-expanded FIXME #23616
-#![feature(unboxed_closures)]
-
struct Bencher;
// ICE
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected u64, found usize
$DIR/issue-26480.rs:38:5: 38:19 note: in this expansion of write! (defined in $DIR/issue-26480.rs)
-error: non-scalar cast: `_` as `()`
+error: non-scalar cast: `{integer}` as `()`
--> $DIR/issue-26480.rs:33:19
|
33 | ($x:expr) => ($x as ())