[[package]]
name = "racer"
-version = "2.0.13"
+version = "2.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex_syntax 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "racer 2.0.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "racer 2.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-analysis 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-blacklist 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-rustc 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-rustc 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-vfs 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustfmt-nightly 0.7.0",
"serde_derive 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
[[package]]
name = "rls-rustc"
-version = "0.2.2"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "racer 2.0.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "racer 2.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "toml"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
[[package]]
name = "toml"
version = "0.4.6"
"checksum quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "07589615d719a60c8dd8a4622e7946465dfef20d1a428f969e3443e7386d5f45"
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
"checksum quote 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0ff51282f28dc1b53fd154298feaa2e77c5ea0dba68e1fd8b03b72fbe13d2a"
-"checksum racer 2.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "40d44bc30fc8d403b665286b2c9a83466ddbf69297668fb02b785c3e58eb8e0d"
+"checksum racer 2.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "e713729f45f12df5c5e182d39506766f76c09133fb661d3622e0ddf8078911c2"
"checksum radix_trie 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "03d0d770481e8af620ca61d3d304bf014f965d7f78e923dc58545e6a545070a9"
"checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1"
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
"checksum rls-analysis 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "da9794cd1f80f2cb888c00641a32f9855d0226c954ee31cef145784914c7142e"
"checksum rls-blacklist 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e4a9cc2545ccb7e05b355bfe047b8039a6ec12270d5f3c996b766b340a50f7d2"
"checksum rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd20763e1c60ae8945384c8a8fa4ac44f8afa7b0a817511f5e8927e5d24f988"
-"checksum rls-rustc 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "885f66b92757420572cbb02e033d4a9558c7413ca9b7ac206f28fd58ffdb44ea"
+"checksum rls-rustc 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ed5342b2bbbe8663c04600af506c8902b6b4d3e627b006eb1bd65aa14805f4d"
"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
"checksum rls-vfs 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "be231e1e559c315bc60ced5ad2cc2d7a9c208ed7d4e2c126500149836fda19bb"
"checksum rustc-ap-rustc_cratesio_shim 128.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7374a2b466e6e3ce489e045302e304849355faf7fd033d4d95e6e86e48c313b4"
"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693"
"checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963"
"checksum time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "a15375f1df02096fb3317256ce2cee6a1f42fc84ea5ad5fc8c421cfe40c73098"
-"checksum toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "736b60249cb25337bc196faa43ee12c705e426f3d55c214d73a4e7be06f92cb4"
"checksum toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a0263c6c02c4db6c8f7681f9fd35e90de799ebd4cfdeab77a38f4ff6b3d8c0d9"
"checksum toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6854664bfc6df0360c695480836ee90e2d0c965f06db291d10be9344792d43e8"
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
}
if verbose > 1 {
- eprintln!("rustc command: {:?}", cmd);
+ eprintln!(
+ "rustc command: {:?}={:?} {:?}",
+ bootstrap::util::dylib_path_var(),
+ env::join_paths(&dylib_path).unwrap(),
+ cmd,
+ );
eprintln!("sysroot: {:?}", sysroot);
eprintln!("libdir: {:?}", libdir);
}
rustc_args: vec![],
fail_fast: true,
doc_tests: DocTests::No,
+ bless: false,
};
let build = Build::new(config);
},
Test {
paths: Vec<PathBuf>,
+ /// Whether to automatically update stderr/stdout files
+ bless: bool,
test_args: Vec<String>,
rustc_args: Vec<String>,
fail_fast: bool,
);
opts.optflag("", "no-doc", "do not run doc tests");
opts.optflag("", "doc", "only run doc tests");
+ opts.optflag("", "bless", "update all stderr/stdout files of failing ui tests");
},
"bench" => { opts.optmulti("", "test-args", "extra arguments", "ARGS"); },
"clean" => { opts.optflag("", "all", "clean all build artifacts"); },
./x.py test src/test/run-pass
./x.py test src/libstd --test-args hash_map
./x.py test src/libstd --stage 0
+ ./x.py test src/test/ui --bless
If no arguments are passed then the complete artifacts for that stage are
compiled and tested.
"test" => {
Subcommand::Test {
paths,
+ bless: matches.opt_present("bless"),
test_args: matches.opt_strs("test-args"),
rustc_args: matches.opt_strs("rustc-args"),
fail_fast: !matches.opt_present("no-fail-fast"),
_ => DocTests::Yes,
}
}
+
+ pub fn bless(&self) -> bool {
+ match *self {
+ Subcommand::Test { bless, .. } => bless,
+ _ => false,
+ }
+ }
}
fn split(s: Vec<String>) -> Vec<String> {
Bench,
}
+impl From<Kind> for TestKind {
+ fn from(kind: Kind) -> Self {
+ match kind {
+ Kind::Test => TestKind::Test,
+ Kind::Bench => TestKind::Bench,
+ _ => panic!("unexpected kind in crate: {:?}", kind)
+ }
+ }
+}
+
impl TestKind {
// Return the cargo subcommand for this test kind
fn subcommand(self) -> &'static str {
cmd.arg("--host").arg(&*compiler.host);
cmd.arg("--llvm-filecheck").arg(builder.llvm_filecheck(builder.config.build));
+ if builder.config.cmd.bless() {
+ cmd.arg("--bless");
+ }
+
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
for krate in builder.in_tree_crates("rustc-main") {
if run.path.ends_with(&krate.path) {
- let test_kind = if builder.kind == Kind::Test {
- TestKind::Test
- } else if builder.kind == Kind::Bench {
- TestKind::Bench
- } else {
- panic!("unexpected builder.kind in crate: {:?}", builder.kind);
- };
+ let test_kind = builder.kind.into();
builder.ensure(CrateLibrustc {
compiler,
let builder = run.builder;
let compiler = builder.compiler(builder.top_stage, run.host);
- let test_kind = if builder.kind == Kind::Test {
- TestKind::Test
- } else if builder.kind == Kind::Bench {
- TestKind::Bench
- } else {
- panic!("unexpected builder.kind in crate: {:?}", builder.kind);
- };
+ let test_kind = builder.kind.into();
builder.ensure(CrateNotDefault {
compiler,
let compiler = builder.compiler(builder.top_stage, run.host);
let make = |mode: Mode, krate: &CargoCrate| {
- let test_kind = if builder.kind == Kind::Test {
- TestKind::Test
- } else if builder.kind == Kind::Bench {
- TestKind::Bench
- } else {
- panic!("unexpected builder.kind in crate: {:?}", builder.kind);
- };
+ let test_kind = builder.kind.into();
builder.ensure(Crate {
compiler,
fn make_run(run: RunConfig) {
let builder = run.builder;
- let test_kind = if builder.kind == Kind::Test {
- TestKind::Test
- } else if builder.kind == Kind::Bench {
- TestKind::Bench
- } else {
- panic!("unexpected builder.kind in crate: {:?}", builder.kind);
- };
+ let test_kind = builder.kind.into();
builder.ensure(CrateRustdoc {
host: run.host,
### Identifiers
-The `ident` production is any nonempty Unicode[^non_ascii_idents] string of
+The `ident` production is any nonempty Unicode string of
the following form:
-[^non_ascii_idents]: Non-ASCII characters in identifiers are currently feature
- gated. This is expected to improve soon.
+- The first character is in one of the following ranges `U+0041` to `U+005A`
+("A" to "Z"), `U+0061` to `U+007A` ("a" to "z"), or `U+005F` ("\_").
+- The remaining characters are in the range `U+0030` to `U+0039` ("0" to "9"),
+or any of the prior valid initial characters.
-- The first character has property `XID_start`
-- The remaining characters have property `XID_continue`
-
-that does _not_ occur in the set of [keywords](#keywords).
-
-> **Note**: `XID_start` and `XID_continue` as character properties cover the
-> character ranges used to form the more familiar C and Java language-family
-> identifiers.
+as long as the identifier does _not_ occur in the set of [keywords](#keywords).
### Delimiter-restricted productions
Some productions are defined by exclusion of particular Unicode characters:
- `non_null` is any single Unicode character aside from `U+0000` (null)
-- `non_eol` is `non_null` restricted to exclude `U+000A` (`'\n'`)
-- `non_single_quote` is `non_null` restricted to exclude `U+0027` (`'`)
-- `non_double_quote` is `non_null` restricted to exclude `U+0022` (`"`)
+- `non_eol` is any single Unicode character aside from `U+000A` (`'\n'`)
+- `non_single_quote` is any single Unicode character aside from `U+0027` (`'`)
+- `non_double_quote` is any single Unicode character aside from `U+0022` (`"`)
## Comments
% The Rust Tutorial
-This tutorial has been deprecated in favor of [the Book](book/index.html). Go check that out instead!
+This tutorial has been deprecated in favor of [the Book](book/index.html), which is available free online and in dead tree form. Go check that out instead!
/// value is not necessarily valid to be used to actually access memory.
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
- /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
- /// and destination must *not* overlap.
+ /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
+ /// and destination may *not* overlap.
///
- /// For regions of memory which might overlap, use [`copy`] instead.
- ///
- /// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`].
- ///
- /// [`copy`]: ./fn.copy.html
- /// [`memcpy`]: https://www.gnu.org/software/libc/manual/html_node/Copying-Strings-and-Arrays.html#index-memcpy
+ /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`.
///
/// # Safety
///
- /// Behavior is undefined if any of the following conditions are violated:
- ///
- /// * The region of memory which begins at `src` and has a length of
- /// `count * size_of::<T>()` bytes must be *both* valid and initialized.
- ///
- /// * The region of memory which begins at `dst` and has a length of
- /// `count * size_of::<T>()` bytes must be valid (but may or may not be
- /// initialized).
- ///
- /// * The two regions of memory must *not* overlap.
- ///
- /// * `src` must be properly aligned.
- ///
- /// * `dst` must be properly aligned.
- ///
- /// Additionally, if `T` is not [`Copy`], only the region at `src` *or* the
- /// region at `dst` can be used or dropped after calling
- /// `copy_nonoverlapping`. `copy_nonoverlapping` creates bitwise copies of
- /// `T`, regardless of whether `T: Copy`, which can result in undefined
- /// behavior if both copies are used.
- ///
- /// [`Copy`]: ../marker/trait.Copy.html
+ /// Beyond requiring that the program must be allowed to access both regions
+ /// of memory, it is Undefined Behavior for source and destination to
+ /// overlap. Care must also be taken with the ownership of `src` and
+ /// `dst`. This method semantically moves the values of `src` into `dst`.
+ /// However it does not drop the contents of `dst`, or prevent the contents
+ /// of `src` from being dropped or used.
///
/// # Examples
///
- /// Manually implement [`Vec::append`]:
+ /// A safe swap function:
///
/// ```
+ /// use std::mem;
/// use std::ptr;
///
- /// /// Moves all the elements of `src` into `dst`, leaving `src` empty.
- /// fn append<T>(dst: &mut Vec<T>, src: &mut Vec<T>) {
- /// let src_len = src.len();
- /// let dst_len = dst.len();
- ///
- /// // Ensure that `dst` has enough capacity to hold all of `src`.
- /// dst.reserve(src_len);
- ///
+ /// # #[allow(dead_code)]
+ /// fn swap<T>(x: &mut T, y: &mut T) {
/// unsafe {
- /// // The call to offset is always safe because `Vec` will never
- /// // allocate more than `isize::MAX` bytes.
- /// let dst = dst.as_mut_ptr().offset(dst_len as isize);
- /// let src = src.as_ptr();
- ///
- /// // The two regions cannot overlap becuase mutable references do
- /// // not alias, and two different vectors cannot own the same
- /// // memory.
- /// ptr::copy_nonoverlapping(src, dst, src_len);
- /// }
+ /// // Give ourselves some scratch space to work with
+ /// let mut t: T = mem::uninitialized();
///
- /// unsafe {
- /// // Truncate `src` without dropping its contents.
- /// src.set_len(0);
+ /// // Perform the swap, `&mut` pointers never alias
+ /// ptr::copy_nonoverlapping(x, &mut t, 1);
+ /// ptr::copy_nonoverlapping(y, x, 1);
+ /// ptr::copy_nonoverlapping(&t, y, 1);
///
- /// // Notify `dst` that it now holds the contents of `src`.
- /// dst.set_len(dst_len + src_len);
+ /// // y and t now point to the same thing, but we need to completely forget `t`
+ /// // because it's no longer relevant.
+ /// mem::forget(t);
/// }
/// }
- ///
- /// let mut a = vec!['r'];
- /// let mut b = vec!['u', 's', 't'];
- ///
- /// append(&mut a, &mut b);
- ///
- /// assert_eq!(a, &['r', 'u', 's', 't']);
- /// assert!(b.is_empty());
/// ```
- ///
- /// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
+ /// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
/// and destination may overlap.
///
- /// If the source and destination will *never* overlap,
- /// [`copy_nonoverlapping`] can be used instead.
- ///
- /// `copy` is semantically equivalent to C's [`memmove`].
- ///
- /// [`copy_nonoverlapping`]: ./fn.copy_nonoverlapping.html
- /// [`memmove`]: https://www.gnu.org/software/libc/manual/html_node/Copying-Strings-and-Arrays.html#index-memmove
+ /// `copy` is semantically equivalent to C's `memmove`.
///
/// # Safety
///
- /// Behavior is undefined if any of the following conditions are violated:
- ///
- /// * The region of memory which begins at `src` and has a length of
- /// `count * size_of::<T>()` bytes must be *both* valid and initialized.
- ///
- /// * The region of memory which begins at `dst` and has a length of
- /// `count * size_of::<T>()` bytes must be valid (but may or may not be
- /// initialized).
- ///
- /// * `src` must be properly aligned.
- ///
- /// * `dst` must be properly aligned.
- ///
- /// Additionally, if `T` is not [`Copy`], only the region at `src` *or* the
- /// region at `dst` can be used or dropped after calling `copy`. `copy`
- /// creates bitwise copies of `T`, regardless of whether `T: Copy`, which
- /// can result in undefined behavior if both copies are used.
- ///
- /// [`Copy`]: ../marker/trait.Copy.html
+ /// Care must be taken with the ownership of `src` and `dst`.
+ /// This method semantically moves the values of `src` into `dst`.
+ /// However it does not drop the contents of `dst`, or prevent the contents of `src`
+ /// from being dropped or used.
///
/// # Examples
///
/// dst
/// }
/// ```
+ ///
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
- /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
- /// `val`.
- ///
- /// `write_bytes` is semantically equivalent to C's [`memset`].
- ///
- /// [`memset`]: https://www.gnu.org/software/libc/manual/html_node/Copying-Strings-and-Arrays.html#index-memset
- ///
- /// # Safety
- ///
- /// Behavior is undefined if any of the following conditions are violated:
- ///
- /// * The region of memory which begins at `dst` and has a length of
- /// `count` bytes must be valid.
- ///
- /// * `dst` must be properly aligned.
- ///
- /// Additionally, the caller must ensure that writing `count` bytes to the
- /// given region of memory results in a valid value of `T`. Creating an
- /// invalid value of `T` can result in undefined behavior. An example is
- /// provided below.
+ /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
+ /// bytes of memory starting at `dst` to `val`.
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::ptr;
///
/// }
/// assert_eq!(vec, [b'a', b'a', 0, 0]);
/// ```
- ///
- /// Creating an invalid value:
- ///
- /// ```no_run
- /// use std::{mem, ptr};
- ///
- /// let mut v = Box::new(0i32);
- ///
- /// unsafe {
- /// // Leaks the previously held value by overwriting the `Box<T>` with
- /// // a null pointer.
- /// ptr::write_bytes(&mut v, 0, mem::size_of::<Box<i32>>());
- /// }
- ///
- /// // At this point, using or dropping `v` results in undefined behavior.
- /// // v = Box::new(0i32); // ERROR
- /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
}
}
+ /// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns `None`.
+ ///
+ /// [`Some`]: #variant.Some
+ /// [`None`]: #variant.None
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_xor)]
+ ///
+ /// let x = Some(2);
+ /// let y: Option<u32> = None;
+ /// assert_eq!(x.xor(y), Some(2));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = Some(2);
+ /// assert_eq!(x.xor(y), Some(2));
+ ///
+ /// let x = Some(2);
+ /// let y = Some(2);
+ /// assert_eq!(x.xor(y), None);
+ ///
+ /// let x: Option<u32> = None;
+ /// let y: Option<u32> = None;
+ /// assert_eq!(x.xor(y), None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "option_xor", issue = "50512")]
+ pub fn xor(self, optb: Option<T>) -> Option<T> {
+ match (self, optb) {
+ (Some(a), None) => Some(a),
+ (None, Some(b)) => Some(b),
+ _ => None,
+ }
+ }
+
/////////////////////////////////////////////////////////////////////////
// Entry-like operations to insert if None and return a reference
/////////////////////////////////////////////////////////////////////////
// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
-//! Manually manage memory through raw pointers.
+//! Raw, unsafe pointers, `*const T`, and `*mut T`.
//!
//! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
/// Executes the destructor (if any) of the pointed-to value.
///
-/// This is semantically equivalent to calling [`ptr::read`] and discarding
-/// the result, but has the following advantages:
+/// This has two use cases:
///
/// * It is *required* to use `drop_in_place` to drop unsized types like
/// trait objects, because they can't be read out onto the stack and
/// dropped normally.
///
-/// * It is friendlier to the optimizer to do this over [`ptr::read`] when
+/// * It is friendlier to the optimizer to do this over `ptr::read` when
/// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
/// as the compiler doesn't need to prove that it's sound to elide the
/// copy.
///
-/// [`ptr::read`]: ../ptr/fn.read.html
-///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
-///
-/// * `to_drop` must point to valid memory.
-///
-/// * `to_drop` must be properly aligned.
-///
-/// Additionally, if `T` is not [`Copy`], using the pointed-to value after
-/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
-/// foo` counts as a use because it will cause the the value to be dropped
-/// again. [`write`] can be used to overwrite data without causing it to be
-/// dropped.
-///
-/// [`Copy`]: ../marker/trait.Copy.html
-/// [`write`]: ../ptr/fn.write.html
-///
-/// # Examples
-///
-/// Manually remove the last item from a vector:
-///
-/// ```
-/// use std::ptr;
-/// use std::rc::Rc;
-///
-/// let last = Rc::new(1);
-/// let weak = Rc::downgrade(&last);
-///
-/// let mut v = vec![Rc::new(0), last];
-///
-/// unsafe {
-/// // Without a call `drop_in_place`, the last item would never be dropped,
-/// // and the memory it manages would be leaked.
-/// ptr::drop_in_place(&mut v[1]);
-/// v.set_len(1);
-/// }
-///
-/// assert_eq!(v, &[0.into()]);
-///
-/// // Ensure that the last item was dropped.
-/// assert!(weak.upgrade().is_none());
-/// ```
+/// This has all the same safety problems as `ptr::read` with respect to
+/// invalid pointers, types, and double drops.
#[stable(feature = "drop_in_place", since = "1.8.0")]
#[lang = "drop_in_place"]
#[allow(unconditional_recursion)]
/// Swaps the values at two mutable locations of the same type, without
/// deinitializing either.
///
-/// But for the following two exceptions, this function is semantically
-/// equivalent to [`mem::swap`]:
-///
-/// * It operates on raw pointers instead of references. When references are
-/// available, [`mem::swap`] should be preferred.
-///
-/// * The two pointed-to values may overlap. If the values do overlap, then the
-/// overlapping region of memory from `x` will be used. This is demonstrated
-/// in the examples below.
-///
-/// [`mem::swap`]: ../mem/fn.swap.html
+/// The values pointed at by `x` and `y` may overlap, unlike `mem::swap` which
+/// is otherwise equivalent. If the values do overlap, then the overlapping
+/// region of memory from `x` will be used. This is demonstrated in the
+/// examples section below.
///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
+/// This function copies the memory through the raw pointers passed to it
+/// as arguments.
///
-/// * `x` and `y` must point to valid, initialized memory.
-///
-/// * `x` and `y` must be properly aligned.
+/// Ensure that these pointers are valid before calling `swap`.
///
/// # Examples
///
}
}
-/// Replaces the value at `dest` with `src`, returning the old value, without
-/// dropping either.
-///
-/// This function is semantically equivalent to [`mem::replace`] except that it
-/// operates on raw pointers instead of references. When references are
-/// available, [`mem::replace`] should be preferred.
-///
-/// [`mem::replace`]: ../mem/fn.replace.html
+/// Replaces the value at `dest` with `src`, returning the old
+/// value, without dropping either.
///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
-///
-/// * `dest` must point to valid, initialized memory.
-///
-/// * `dest` must be properly aligned.
-///
-/// # Examples
-///
-/// ```
-/// use std::ptr;
-///
-/// let mut rust = vec!['b', 'u', 's', 't'];
-///
-/// // `mem::replace` would have the same effect without requiring the unsafe
-/// // block.
-/// let b = unsafe {
-/// ptr::replace(&mut rust[0], 'r')
-/// };
-///
-/// assert_eq!(b, 'b');
-/// assert_eq!(rust, &['r', 'u', 's', 't']);
-/// ```
+/// This is only unsafe because it accepts a raw pointer.
+/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
+/// Beyond accepting a raw pointer, this is unsafe because it semantically
+/// moves the value out of `src` without preventing further usage of `src`.
+/// If `T` is not `Copy`, then care must be taken to ensure that the value at
+/// `src` is not used before the data is overwritten again (e.g. with `write`,
+/// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
+/// because it will attempt to drop the value previously at `*src`.
///
-/// * `src` must point to valid, initialized memory.
-///
-/// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
-/// case.
-///
-/// Additionally, if `T` is not [`Copy`], only the returned value *or* the
-/// pointed-to value can be used or dropped after calling `read`. `read` creates
-/// a bitwise copy of `T`, regardless of whether `T: Copy`, which can result
-/// in undefined behavior if both copies are used. Note that `*src = foo` counts
-/// as a use because it will attempt to drop the value previously at `*src`.
-/// [`write`] can be used to overwrite data without causing it to be dropped.
-///
-/// [`Copy`]: ../marker/trait.Copy.html
-/// [`read_unaligned`]: ./fn.read_unaligned.html
-/// [`write`]: ./fn.write.html
+/// The pointer must be aligned; use `read_unaligned` if that is not the case.
///
/// # Examples
///
/// assert_eq!(std::ptr::read(y), 12);
/// }
/// ```
-///
-/// Manually implement [`mem::swap`]:
-///
-/// ```
-/// use std::ptr;
-///
-/// fn swap<T>(a: &mut T, b: &mut T) {
-/// unsafe {
-/// // Create a bitwise copy of the value at `a` in `tmp`.
-/// let tmp = ptr::read(a);
-///
-/// // Exiting at this point (either by explicitly returning or by
-/// // calling a function which panics) would cause the value in `tmp` to
-/// // be dropped while the same value is still referenced by `a`. This
-/// // could trigger undefined behavior if `T` is not `Copy`.
-///
-/// // Create a bitwise copy of the value at `b` in `a`.
-/// // This is safe because mutable references cannot alias.
-/// ptr::copy_nonoverlapping(b, a, 1);
-///
-/// // As above, exiting here could trigger undefined behavior because
-/// // the same value is referenced by `a` and `b`.
-///
-/// // Move `tmp` into `b`.
-/// ptr::write(b, tmp);
-/// }
-/// }
-///
-/// let mut foo = "foo".to_owned();
-/// let mut bar = "bar".to_owned();
-///
-/// swap(&mut foo, &mut bar);
-///
-/// assert_eq!(foo, "bar");
-/// assert_eq!(bar, "foo");
-/// ```
-///
-/// [`mem::swap`]: ../mem/fn.swap.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn read<T>(src: *const T) -> T {
/// Reads the value from `src` without moving it. This leaves the
/// memory in `src` unchanged.
///
-/// Unlike [`read`], `read_unaligned` works with unaligned pointers.
-///
-/// [`read`]: ./fn.read.html
+/// Unlike `read`, the pointer may be unaligned.
///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
-///
-/// * `src` must point to valid, initialized memory.
-///
-/// Additionally, if `T` is not [`Copy`], only the returned value *or* the
-/// pointed-to value can be used or dropped after calling `read_unaligned`.
-/// `read_unaligned` creates a bitwise copy of `T`, regardless of whether `T:
-/// Copy`, and this can result in undefined behavior if both copies are used.
-/// Note that `*src = foo` counts as a use because it will attempt to drop the
-/// value previously at `*src`. [`write_unaligned`] can be used to overwrite
-/// data without causing it to be dropped.
-///
-/// [`Copy`]: ../marker/trait.Copy.html
-/// [`write_unaligned`]: ./fn.write_unaligned.html
+/// Beyond accepting a raw pointer, this is unsafe because it semantically
+/// moves the value out of `src` without preventing further usage of `src`.
+/// If `T` is not `Copy`, then care must be taken to ensure that the value at
+/// `src` is not used before the data is overwritten again (e.g. with `write`,
+/// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
+/// because it will attempt to drop the value previously at `*src`.
///
/// # Examples
///
-/// Access members of a packed struct by reference:
+/// Basic usage:
///
/// ```
-/// use std::ptr;
+/// let x = 12;
+/// let y = &x as *const i32;
///
-/// #[repr(packed, C)]
-/// #[derive(Default)]
-/// struct Packed {
-/// _padding: u8,
-/// unaligned: u32,
+/// unsafe {
+/// assert_eq!(std::ptr::read_unaligned(y), 12);
/// }
-///
-/// let x = Packed {
-/// _padding: 0x00,
-/// unaligned: 0x01020304,
-/// };
-///
-/// let v = unsafe {
-/// // Take a reference to a 32-bit integer which is not aligned.
-/// let unaligned = &x.unaligned;
-///
-/// // Dereferencing normally will emit an unaligned load instruction,
-/// // causing undefined behavior.
-/// // let v = *unaligned; // ERROR
-///
-/// // Instead, use `read_unaligned` to read improperly aligned values.
-/// let v = ptr::read_unaligned(unaligned);
-///
-/// v
-/// };
-///
-/// // Accessing unaligned values directly is safe.
-/// assert!(x.unaligned == v);
/// ```
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
-/// `write` does not drop the contents of `dst`. This is safe, but it could leak
+/// # Safety
+///
+/// This operation is marked unsafe because it accepts a raw pointer.
+///
+/// It does not drop the contents of `dst`. This is safe, but it could leak
/// allocations or resources, so care must be taken not to overwrite an object
/// that should be dropped.
///
/// location pointed to by `dst`.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
-/// memory that has previously been [`read`] from.
-///
-/// [`read`]: ./fn.read.html
-///
-/// # Safety
+/// memory that has previously been `read` from.
///
-/// Behavior is undefined if any of the following conditions are violated:
-///
-/// * `dst` must point to valid memory.
-///
-/// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
-/// case.
-///
-/// [`write_unaligned`]: ./fn.write_unaligned.html
+/// The pointer must be aligned; use `write_unaligned` if that is not the case.
///
/// # Examples
///
/// assert_eq!(std::ptr::read(y), 12);
/// }
/// ```
-///
-/// Manually implement [`mem::swap`]:
-///
-/// ```
-/// use std::ptr;
-///
-/// fn swap<T>(a: &mut T, b: &mut T) {
-/// unsafe {
-/// let tmp = ptr::read(a);
-/// ptr::copy_nonoverlapping(b, a, 1);
-/// ptr::write(b, tmp);
-/// }
-/// }
-///
-/// let mut foo = "foo".to_owned();
-/// let mut bar = "bar".to_owned();
-///
-/// swap(&mut foo, &mut bar);
-///
-/// assert_eq!(foo, "bar");
-/// assert_eq!(bar, "foo");
-/// ```
-///
-/// [`mem::swap`]: ../mem/fn.swap.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn write<T>(dst: *mut T, src: T) {
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
-/// Unlike [`write`], the pointer may be unaligned.
+/// Unlike `write`, the pointer may be unaligned.
+///
+/// # Safety
///
-/// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
-/// could leak allocations or resources, so care must be taken not to overwrite
-/// an object that should be dropped.
+/// This operation is marked unsafe because it accepts a raw pointer.
+///
+/// It does not drop the contents of `dst`. This is safe, but it could leak
+/// allocations or resources, so care must be taken not to overwrite an object
+/// that should be dropped.
///
/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
/// location pointed to by `dst`.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
-/// memory that has previously been read with [`read_unaligned`].
-///
-/// [`write`]: ./fn.write.html
-/// [`read_unaligned`]: ./fn.read_unaligned.html
-///
-/// # Safety
-///
-/// Behavior is undefined if any of the following conditions are violated:
-///
-/// * `dst` must point to valid memory.
+/// memory that has previously been `read` from.
///
/// # Examples
///
-/// Access fields in a packed struct:
+/// Basic usage:
///
/// ```
-/// use std::{mem, ptr};
-///
-/// #[repr(packed, C)]
-/// #[derive(Default)]
-/// struct Packed {
-/// _padding: u8,
-/// unaligned: u32,
-/// }
-///
-/// let v = 0x01020304;
-/// let mut x: Packed = unsafe { mem::zeroed() };
+/// let mut x = 0;
+/// let y = &mut x as *mut i32;
+/// let z = 12;
///
/// unsafe {
-/// // Take a reference to a 32-bit integer which is not aligned.
-/// let unaligned = &mut x.unaligned;
-///
-/// // Dereferencing normally will emit an unaligned store instruction,
-/// // causing undefined behavior.
-/// // *unaligned = v; // ERROR
-///
-/// // Instead, use `write_unaligned` to write improperly aligned values.
-/// ptr::write_unaligned(unaligned, v);
+/// std::ptr::write_unaligned(y, z);
+/// assert_eq!(std::ptr::read_unaligned(y), 12);
/// }
-///
-/// // Accessing unaligned values directly is safe.
-/// assert!(x.unaligned == v);
+/// ```
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
/// to not be elided or reordered by the compiler across other volatile
/// operations.
///
-/// Memory read with `read_volatile` should almost always be written to using
-/// [`write_volatile`].
-///
-/// [`write_volatile`]: ./fn.write_volatile.html
-///
/// # Notes
///
/// Rust does not currently have a rigorously and formally defined memory model,
///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
-///
-/// * `src` must point to valid, initialized memory.
-///
-/// * `src` must be properly aligned.
-///
-/// Like [`read`], `read_volatile` creates a bitwise copy of the pointed-to
-/// object, regardless of whether `T` is [`Copy`]. Using both values can cause
-/// undefined behavior. However, storing non-[`Copy`] data in I/O memory is
-/// almost certainly incorrect.
-///
-/// [`Copy`]: ../marker/trait.Copy.html
-/// [`read`]: ./fn.read.html
+/// Beyond accepting a raw pointer, this is unsafe because it semantically
+/// moves the value out of `src` without preventing further usage of `src`.
+/// If `T` is not `Copy`, then care must be taken to ensure that the value at
+/// `src` is not used before the data is overwritten again (e.g. with `write`,
+/// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use
+/// because it will attempt to drop the value previously at `*src`.
///
/// # Examples
///
/// to not be elided or reordered by the compiler across other volatile
/// operations.
///
-/// Memory written with `write_volatile` should almost always be read from using
-/// [`read_volatile`].
-///
-/// `write_volatile` does not drop the contents of `dst`. This is safe, but it
-/// could leak allocations or resources, so care must be taken not to overwrite
-/// an object that should be dropped.
-///
-/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
-/// location pointed to by `dst`.
-///
-/// [`read_volatile`]: ./fn.read_volatile.html
-///
/// # Notes
///
/// Rust does not currently have a rigorously and formally defined memory model,
///
/// # Safety
///
-/// Behavior is undefined if any of the following conditions are violated:
+/// This operation is marked unsafe because it accepts a raw pointer.
///
-/// * `dst` must point to valid memory.
+/// It does not drop the contents of `dst`. This is safe, but it could leak
+/// allocations or resources, so care must be taken not to overwrite an object
+/// that should be dropped.
///
-/// * `dst` must be properly aligned.
+/// This is appropriate for initializing uninitialized memory, or overwriting
+/// memory that has previously been `read` from.
///
/// # Examples
///
CountImplied,
}
+pub struct ParseError {
+ pub description: string::String,
+ pub note: Option<string::String>,
+ pub label: string::String,
+ pub start: usize,
+ pub end: usize,
+}
+
/// The parser structure for interpreting the input format string. This is
/// modeled as an iterator over `Piece` structures to form a stream of tokens
/// being output.
input: &'a str,
cur: iter::Peekable<str::CharIndices<'a>>,
/// Error messages accumulated during parsing
- pub errors: Vec<(string::String, Option<string::String>)>,
+ pub errors: Vec<ParseError>,
/// Current position of implicit positional argument pointer
curarg: usize,
}
}
'}' => {
self.cur.next();
+ let pos = pos + 1;
if self.consume('}') {
- Some(String(self.string(pos + 1)))
+ Some(String(self.string(pos)))
} else {
- self.err_with_note("unmatched `}` found",
- "if you intended to print `}`, \
- you can escape it using `}}`");
+ self.err_with_note(
+ "unmatched `}` found",
+ "unmatched `}`",
+ "if you intended to print `}`, you can escape it using `}}`",
+ pos,
+ pos,
+ );
None
}
}
/// Notifies of an error. The message doesn't actually need to be of type
/// String, but I think it does when this eventually uses conditions so it
/// might as well start using it now.
- fn err(&mut self, msg: &str) {
- self.errors.push((msg.to_owned(), None));
+ fn err<S1: Into<string::String>, S2: Into<string::String>>(
+ &mut self,
+ description: S1,
+ label: S2,
+ start: usize,
+ end: usize,
+ ) {
+ self.errors.push(ParseError {
+ description: description.into(),
+ note: None,
+ label: label.into(),
+ start,
+ end,
+ });
}
/// Notifies of an error. The message doesn't actually need to be of type
/// String, but I think it does when this eventually uses conditions so it
/// might as well start using it now.
- fn err_with_note(&mut self, msg: &str, note: &str) {
- self.errors.push((msg.to_owned(), Some(note.to_owned())));
+ fn err_with_note<S1: Into<string::String>, S2: Into<string::String>, S3: Into<string::String>>(
+ &mut self,
+ description: S1,
+ label: S2,
+ note: S3,
+ start: usize,
+ end: usize,
+ ) {
+ self.errors.push(ParseError {
+ description: description.into(),
+ note: Some(note.into()),
+ label: label.into(),
+ start,
+ end,
+ });
}
/// Optionally consumes the specified character. If the character is not at
/// found, an error is emitted.
fn must_consume(&mut self, c: char) {
self.ws();
- if let Some(&(_, maybe)) = self.cur.peek() {
+ if let Some(&(pos, maybe)) = self.cur.peek() {
if c == maybe {
self.cur.next();
} else {
- self.err(&format!("expected `{:?}`, found `{:?}`", c, maybe));
+ self.err(format!("expected `{:?}`, found `{:?}`", c, maybe),
+ format!("expected `{}`", c),
+ pos + 1,
+ pos + 1);
}
} else {
- let msg = &format!("expected `{:?}` but string was terminated", c);
+ let msg = format!("expected `{:?}` but string was terminated", c);
+ let pos = self.input.len() + 1; // point at closing `"`
if c == '}' {
self.err_with_note(msg,
- "if you intended to print `{`, you can escape it using `{{`");
+ format!("expected `{:?}`", c),
+ "if you intended to print `{`, you can escape it using `{{`",
+ pos,
+ pos);
} else {
- self.err(msg);
+ self.err(msg, format!("expected `{:?}`", c), pos, pos);
}
}
}
} else {
match self.cur.peek() {
Some(&(_, c)) if c.is_alphabetic() => Some(ArgumentNamed(self.word())),
+ Some(&(pos, c)) if c == '_' => {
+ let invalid_name = self.string(pos);
+ self.err_with_note(format!("invalid argument name `{}`", invalid_name),
+ "invalid argument name",
+ "argument names cannot start with an underscore",
+ pos + 1, // add 1 to account for leading `{`
+ pos + 1 + invalid_name.len());
+ Some(ArgumentNamed(invalid_name))
+ },
// This is an `ArgumentNext`.
// Record the fact and do the resolution after parsing the
assert!(previous.is_none());
}
- fn closure_is_enclosed_by(&self,
- mut sub_closure: hir::ItemLocalId,
- sup_closure: hir::ItemLocalId) -> bool {
- loop {
- if sub_closure == sup_closure { return true; }
- match self.closure_tree.get(&sub_closure) {
- Some(&s) => { sub_closure = s; }
- None => { return false; }
- }
- }
- }
-
fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) {
debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime);
assert!(var != lifetime.item_local_id());
// requires a hash table lookup, and we often have very long scope
// chains (10s or 100s of scopes) that only differ by a few elements at
// the start. So this algorithm is faster.
- let mut ma = Some(scope_a);
- let mut mb = Some(scope_b);
- let mut seen_a: SmallVec<[Scope; 32]> = SmallVec::new();
- let mut seen_b: SmallVec<[Scope; 32]> = SmallVec::new();
+
+ let mut ma = Some(&scope_a);
+ let mut mb = Some(&scope_b);
+
+ // A HashSet<Scope> is a more obvious choice for these, but SmallVec is
+ // faster because the set size is normally small so linear search is
+ // as good or better than a hash table lookup, plus the size is usually
+ // small enough to avoid a heap allocation.
+ let mut seen_a: SmallVec<[&Scope; 32]> = SmallVec::new();
+ let mut seen_b: SmallVec<[&Scope; 32]> = SmallVec::new();
+
loop {
if let Some(a) = ma {
- if seen_b.iter().position(|s| *s == a).is_some() {
- return a;
+ if seen_b.iter().any(|s| *s == a) {
+ return *a;
}
seen_a.push(a);
- ma = self.parent_map.get(&a).map(|s| *s);
+ ma = self.parent_map.get(&a);
}
if let Some(b) = mb {
- if seen_a.iter().position(|s| *s == b).is_some() {
- return b;
+ if seen_a.iter().any(|s| *s == b) {
+ return *b;
}
seen_b.push(b);
- mb = self.parent_map.get(&b).map(|s| *s);
+ mb = self.parent_map.get(&b);
}
if ma.is_none() && mb.is_none() {
- break;
- }
- };
-
- fn outermost_scope(parent_map: &FxHashMap<Scope, Scope>, scope: Scope) -> Scope {
- let mut scope = scope;
- loop {
- match parent_map.get(&scope) {
- Some(&superscope) => scope = superscope,
- None => break scope,
- }
- }
- }
-
- // In this (rare) case, the two regions belong to completely different
- // functions. Compare those fn for lexical nesting. The reasoning
- // behind this is subtle. See the "Modeling closures" section of the
- // README in infer::region_constraints for more details.
- let a_root_scope = outermost_scope(&self.parent_map, scope_a);
- let b_root_scope = outermost_scope(&self.parent_map, scope_b);
- match (a_root_scope.data(), b_root_scope.data()) {
- (ScopeData::Destruction(a_root_id),
- ScopeData::Destruction(b_root_id)) => {
- if self.closure_is_enclosed_by(a_root_id, b_root_id) {
- // `a` is enclosed by `b`, hence `b` is the ancestor of everything in `a`
- scope_b
- } else if self.closure_is_enclosed_by(b_root_id, a_root_id) {
- // `b` is enclosed by `a`, hence `a` is the ancestor of everything in `b`
- scope_a
- } else {
- // neither fn encloses the other
- bug!()
- }
- }
- _ => {
- // root ids are always Node right now
- bug!()
+ // No nearest common ancestor found.
+ bug!();
}
}
}
obligation.cause.span,
infer::LateBoundRegionConversionTime::HigherRankedType,
data);
- let normalized = super::normalize_projection_type(
+ let mut obligations = vec![];
+ let normalized_ty = super::normalize_projection_type(
&mut selcx,
obligation.param_env,
data.projection_ty,
obligation.cause.clone(),
- 0
+ 0,
+ &mut obligations
);
if let Err(error) = self.at(&obligation.cause, obligation.param_env)
- .eq(normalized.value, data.ty) {
+ .eq(normalized_ty, data.ty) {
values = Some(infer::ValuePairs::Types(ExpectedFound {
- expected: normalized.value,
+ expected: normalized_ty,
found: data.ty,
}));
err_buf = error;
// FIXME(#20304) -- cache
let mut selcx = SelectionContext::new(infcx);
- let normalized = project::normalize_projection_type(&mut selcx,
- param_env,
- projection_ty,
- cause,
- 0);
-
- for obligation in normalized.obligations {
- self.register_predicate_obligation(infcx, obligation);
- }
-
- debug!("normalize_projection_type: result={:?}", normalized.value);
-
- normalized.value
+ let mut obligations = vec![];
+ let normalized_ty = project::normalize_projection_type(&mut selcx,
+ param_env,
+ projection_ty,
+ cause,
+ 0,
+ &mut obligations);
+ self.register_predicate_obligations(infcx, obligations);
+
+ debug!("normalize_projection_type: result={:?}", normalized_ty);
+
+ normalized_ty
}
/// Requires that `ty` must implement the trait with `def_id` in
debug!("project_and_unify_type(obligation={:?})",
obligation);
- let Normalized { value: normalized_ty, mut obligations } =
+ let mut obligations = vec![];
+ let normalized_ty =
match opt_normalize_projection_type(selcx,
obligation.param_env,
obligation.predicate.projection_ty,
obligation.cause.clone(),
- obligation.recursion_depth) {
+ obligation.recursion_depth,
+ &mut obligations) {
Some(n) => n,
None => return Ok(None),
};
// binder). It would be better to normalize in a
// binding-aware fashion.
- let Normalized { value: normalized_ty, obligations } =
- normalize_projection_type(self.selcx,
- self.param_env,
- data.clone(),
- self.cause.clone(),
- self.depth);
- debug!("AssociatedTypeNormalizer: depth={} normalized {:?} to {:?} \
- with {} add'l obligations",
- self.depth, ty, normalized_ty, obligations.len());
- self.obligations.extend(obligations);
+ let normalized_ty = normalize_projection_type(self.selcx,
+ self.param_env,
+ data.clone(),
+ self.cause.clone(),
+ self.depth,
+ &mut self.obligations);
+ debug!("AssociatedTypeNormalizer: depth={} normalized {:?} to {:?}, \
+ now with {} obligations",
+ self.depth, ty, normalized_ty, self.obligations.len());
normalized_ty
}
param_env: ty::ParamEnv<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
- depth: usize)
- -> NormalizedTy<'tcx>
+ depth: usize,
+ obligations: &mut Vec<PredicateObligation<'tcx>>)
+ -> Ty<'tcx>
{
- opt_normalize_projection_type(selcx, param_env, projection_ty.clone(), cause.clone(), depth)
+ opt_normalize_projection_type(selcx, param_env, projection_ty.clone(), cause.clone(), depth,
+ obligations)
.unwrap_or_else(move || {
// if we bottom out in ambiguity, create a type variable
// and a deferred predicate to resolve this when more type
});
let obligation = Obligation::with_depth(
cause, depth + 1, param_env, projection.to_predicate());
- Normalized {
- value: ty_var,
- obligations: vec![obligation]
- }
+ obligations.push(obligation);
+ ty_var
})
}
/// as Trait>::Item`. The result is always a type (and possibly
/// additional obligations). Returns `None` in the case of ambiguity,
/// which indicates that there are unbound type variables.
+///
+/// This function used to return `Option<NormalizedTy<'tcx>>`, which contains a
+/// `Ty<'tcx>` and an obligations vector. But that obligation vector was very
+/// often immediately appended to another obligations vector. So now this
+/// function takes an obligations vector and appends to it directly, which is
+/// slightly uglier but avoids the need for an extra short-lived allocation.
fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
- depth: usize)
- -> Option<NormalizedTy<'tcx>>
+ depth: usize,
+ obligations: &mut Vec<PredicateObligation<'tcx>>)
+ -> Option<Ty<'tcx>>
{
let infcx = selcx.infcx();
projection_ty);
selcx.infcx().report_overflow_error(&obligation, false);
}
- Err(ProjectionCacheEntry::NormalizedTy(mut ty)) => {
+ Err(ProjectionCacheEntry::NormalizedTy(ty)) => {
+ // This is the hottest path in this function.
+ //
// If we find the value in the cache, then return it along
// with the obligations that went along with it. Note
// that, when using a fulfillment context, these
// Once we have inferred everything we need to know, we
// can ignore the `obligations` from that point on.
if !infcx.any_unresolved_type_vars(&ty.value) {
- infcx.projection_cache.borrow_mut().complete(cache_key);
- ty.obligations = vec![];
+ infcx.projection_cache.borrow_mut().complete_normalized(cache_key, &ty);
+ // No need to extend `obligations`.
+ } else {
+ obligations.extend(ty.obligations);
}
- push_paranoid_cache_value_obligation(infcx,
- param_env,
- projection_ty,
- cause,
- depth,
- &mut ty);
-
- return Some(ty);
+ obligations.push(get_paranoid_cache_value_obligation(infcx,
+ param_env,
+ projection_ty,
+ cause,
+ depth));
+ return Some(ty.value);
}
Err(ProjectionCacheEntry::Error) => {
debug!("opt_normalize_projection_type: \
found error");
- return Some(normalize_to_error(selcx, param_env, projection_ty, cause, depth));
+ let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth);
+ obligations.extend(result.obligations);
+ return Some(result.value)
}
}
let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty);
match project_type(selcx, &obligation) {
- Ok(ProjectedTy::Progress(Progress { ty: projected_ty, mut obligations })) => {
+ Ok(ProjectedTy::Progress(Progress { ty: projected_ty,
+ obligations: mut projected_obligations })) => {
// if projection succeeded, then what we get out of this
// is also non-normalized (consider: it was derived from
// an impl, where-clause etc) and hence we must
debug!("opt_normalize_projection_type: \
projected_ty={:?} \
depth={} \
- obligations={:?}",
+ projected_obligations={:?}",
projected_ty,
depth,
- obligations);
+ projected_obligations);
let result = if projected_ty.has_projections() {
let mut normalizer = AssociatedTypeNormalizer::new(selcx,
normalized_ty,
depth);
- obligations.extend(normalizer.obligations);
+ projected_obligations.extend(normalizer.obligations);
Normalized {
value: normalized_ty,
- obligations,
+ obligations: projected_obligations,
}
} else {
Normalized {
value: projected_ty,
- obligations,
+ obligations: projected_obligations,
}
};
let cache_value = prune_cache_value_obligations(infcx, &result);
infcx.projection_cache.borrow_mut().insert_ty(cache_key, cache_value);
-
- Some(result)
+ obligations.extend(result.obligations);
+ Some(result.value)
}
Ok(ProjectedTy::NoProgress(projected_ty)) => {
debug!("opt_normalize_projection_type: \
obligations: vec![]
};
infcx.projection_cache.borrow_mut().insert_ty(cache_key, result.clone());
- Some(result)
+ // No need to extend `obligations`.
+ Some(result.value)
}
Err(ProjectionTyError::TooManyCandidates) => {
debug!("opt_normalize_projection_type: \
infcx.projection_cache.borrow_mut()
.error(cache_key);
- Some(normalize_to_error(selcx, param_env, projection_ty, cause, depth))
+ let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth);
+ obligations.extend(result.obligations);
+ Some(result.value)
}
}
}
/// may or may not be necessary -- in principle, all the obligations
/// that must be proven to show that `T: Trait` were also returned
/// when the cache was first populated. But there are some vague concerns,
-/// and so we take the precatuionary measure of including `T: Trait` in
+/// and so we take the precautionary measure of including `T: Trait` in
/// the result:
///
/// Concern #1. The current setup is fragile. Perhaps someone could
/// that may yet turn out to be wrong. This *may* lead to some sort
/// of trouble, though we don't have a concrete example of how that
/// can occur yet. But it seems risky at best.
-fn push_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- projection_ty: ty::ProjectionTy<'tcx>,
- cause: ObligationCause<'tcx>,
- depth: usize,
- result: &mut NormalizedTy<'tcx>)
+fn get_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>(
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize)
+ -> PredicateObligation<'tcx>
{
let trait_ref = projection_ty.trait_ref(infcx.tcx).to_poly_trait_ref();
- let trait_obligation = Obligation { cause,
- recursion_depth: depth,
- param_env,
- predicate: trait_ref.to_predicate() };
- result.obligations.push(trait_obligation);
+ Obligation {
+ cause,
+ recursion_depth: depth,
+ param_env,
+ predicate: trait_ref.to_predicate(),
+ }
}
/// If we are projecting `<T as Trait>::Item`, but `T: Trait` does not
}));
}
+ /// A specialized version of `complete` for when the key's value is known
+ /// to be a NormalizedTy.
+ pub fn complete_normalized(&mut self, key: ProjectionCacheKey<'tcx>, ty: &NormalizedTy<'tcx>) {
+ // We want to insert `ty` with no obligations. If the existing value
+ // already has no obligations (as is common) we can use `insert_noop`
+ // to do a minimal amount of work -- the HashMap insertion is skipped,
+ // and minimal changes are made to the undo log.
+ if ty.obligations.is_empty() {
+ self.map.insert_noop();
+ } else {
+ self.map.insert(key, ProjectionCacheEntry::NormalizedTy(Normalized {
+ value: ty.value,
+ obligations: vec![]
+ }));
+ }
+ }
+
/// Indicates that trying to normalize `key` resulted in
/// ambiguity. No point in trying it again then until we gain more
/// type information (in which case, the "fully resolved" key will
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::collections::BTreeMap;
+use indexed_vec::{Idx, IndexVec};
use std::collections::btree_map::Entry;
-use std::marker::PhantomData;
+use std::collections::BTreeMap;
use std::iter::FromIterator;
-use indexed_vec::{Idx, IndexVec};
+use std::marker::PhantomData;
type Word = u128;
const WORD_BITS: usize = 128;
if read != write {
let (bit_set_read, bit_set_write) = self.vector.pick2_mut(read, write);
- for read_val in bit_set_read.iter() {
- changed = changed | bit_set_write.insert(read_val);
+ for read_chunk in bit_set_read.chunks() {
+ changed = changed | bit_set_write.insert_chunk(read_chunk).any();
}
}
changed
}
+ /// True if `sub` is a subset of `sup`
+ pub fn is_subset(&self, sub: R, sup: R) -> bool {
+ sub == sup || {
+ let bit_set_sub = &self.vector[sub];
+ let bit_set_sup = &self.vector[sup];
+ bit_set_sub
+ .chunks()
+ .all(|read_chunk| read_chunk.bits_eq(bit_set_sup.contains_chunk(read_chunk)))
+ }
+ }
+
/// Iterates through all the columns set to true in a given row of
/// the matrix.
pub fn iter<'a>(&'a self, row: R) -> impl Iterator<Item = C> + 'a {
}
impl<I: Idx> SparseChunk<I> {
+ #[inline]
pub fn one(index: I) -> Self {
let index = index.index();
let key_usize = index / 128;
}
}
+ #[inline]
pub fn any(&self) -> bool {
self.bits != 0
}
+ #[inline]
+ pub fn bits_eq(&self, other: SparseChunk<I>) -> bool {
+ self.bits == other.bits
+ }
+
pub fn iter(&self) -> impl Iterator<Item = I> {
let base = self.key as usize * 128;
let mut bits = self.bits;
self.chunk_bits.len() * 128
}
+ /// Returns a chunk containing only those bits that are already
+ /// present. You can test therefore if `self` contains all the
+ /// bits in chunk already by doing `chunk ==
+ /// self.contains_chunk(chunk)`.
pub fn contains_chunk(&self, chunk: SparseChunk<I>) -> SparseChunk<I> {
SparseChunk {
bits: self.chunk_bits
}
}
+ /// Modifies `self` to contain all the bits from `chunk` (in
+ /// addition to any pre-existing bits); returns a new chunk that
+ /// contains only those bits that were newly added. You can test
+ /// if anything was inserted by invoking `any()` on the returned
+ /// value.
pub fn insert_chunk(&mut self, chunk: SparseChunk<I>) -> SparseChunk<I> {
if chunk.bits == 0 {
return chunk;
}
}
+ pub fn insert_noop(&mut self) {
+ if !self.undo_log.is_empty() {
+ self.undo_log.push(UndoLog::Noop);
+ }
+ }
+
pub fn remove(&mut self, key: K) -> bool {
match self.map.remove(&key) {
Some(old_value) => {
//! This query borrow-checks the MIR to (further) ensure it is not broken.
-use borrow_check::nll::region_infer::{RegionCausalInfo, RegionInferenceContext};
+use borrow_check::nll::region_infer::RegionInferenceContext;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
nonlexical_regioncx: regioncx,
used_mut: FxHashSet(),
used_mut_upvars: SmallVec::new(),
- nonlexical_cause_info: None,
borrow_set,
dominators,
};
/// contains the results from region inference and lets us e.g.
/// find out which CFG points are contained in each borrow region.
nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
- nonlexical_cause_info: Option<RegionCausalInfo>,
/// The set of borrows extracted from the MIR
borrow_set: Rc<BorrowSet<'tcx>>,
let regioncx = &&self.nonlexical_regioncx;
let mir = self.mir;
- if self.nonlexical_cause_info.is_none() {
- self.nonlexical_cause_info = Some(regioncx.compute_causal_info(mir));
- }
-
- let cause_info = self.nonlexical_cause_info.as_ref().unwrap();
- if let Some(cause) = cause_info.why_region_contains_point(borrow.region, context.loc) {
- match *cause.root_cause() {
+ let borrow_region_vid = regioncx.to_region_vid(borrow.region);
+ if let Some(cause) = regioncx.why_region_contains_point(borrow_region_vid, context.loc) {
+ match cause {
Cause::LiveVar(local, location) => {
match find_regular_use(mir, regioncx, borrow, location, local) {
Some(p) => {
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Module defining the `dfs` method on `RegionInferenceContext`, along with
-//! its associated helper traits.
-
-use borrow_check::nll::universal_regions::UniversalRegions;
-use borrow_check::nll::region_infer::RegionInferenceContext;
-use borrow_check::nll::region_infer::values::{RegionElementIndex, RegionValueElements,
- RegionValues};
-use syntax::codemap::Span;
-use rustc::mir::{Location, Mir};
-use rustc::ty::RegionVid;
-use rustc_data_structures::bitvec::BitVector;
-use rustc_data_structures::indexed_vec::Idx;
-
-pub(super) struct DfsStorage {
- stack: Vec<Location>,
- visited: BitVector,
-}
-
-impl<'tcx> RegionInferenceContext<'tcx> {
- /// Creates dfs storage for use by dfs; this should be shared
- /// across as many calls to dfs as possible to amortize allocation
- /// costs.
- pub(super) fn new_dfs_storage(&self) -> DfsStorage {
- let num_elements = self.elements.num_elements();
- DfsStorage {
- stack: vec![],
- visited: BitVector::new(num_elements),
- }
- }
-
- /// Function used to satisfy or test a `R1: R2 @ P`
- /// constraint. The core idea is that it performs a DFS starting
- /// from `P`. The precise actions *during* that DFS depend on the
- /// `op` supplied, so see (e.g.) `CopyFromSourceToTarget` for more
- /// details.
- ///
- /// Returns:
- ///
- /// - `Ok(true)` if the walk was completed and something changed
- /// along the way;
- /// - `Ok(false)` if the walk was completed with no changes;
- /// - `Err(early)` if the walk was existed early by `op`. `earlyelem` is the
- /// value that `op` returned.
- #[inline(never)] // ensure dfs is identifiable in profiles
- pub(super) fn dfs<C>(
- &self,
- mir: &Mir<'tcx>,
- dfs: &mut DfsStorage,
- mut op: C,
- ) -> Result<bool, C::Early>
- where
- C: DfsOp,
- {
- let mut changed = false;
-
- dfs.visited.clear();
- dfs.stack.push(op.start_point());
- while let Some(p) = dfs.stack.pop() {
- let point_index = self.elements.index(p);
-
- if !op.source_region_contains(point_index) {
- debug!(" not in from-region");
- continue;
- }
-
- if !dfs.visited.insert(point_index.index()) {
- debug!(" already visited");
- continue;
- }
-
- let new = op.add_to_target_region(point_index)?;
- changed |= new;
-
- let block_data = &mir[p.block];
-
- let start_stack_len = dfs.stack.len();
-
- if p.statement_index < block_data.statements.len() {
- dfs.stack.push(Location {
- statement_index: p.statement_index + 1,
- ..p
- });
- } else {
- dfs.stack.extend(
- block_data
- .terminator()
- .successors()
- .map(|&basic_block| Location {
- statement_index: 0,
- block: basic_block,
- }),
- );
- }
-
- if dfs.stack.len() == start_stack_len {
- // If we reach the END point in the graph, then copy
- // over any skolemized end points in the `from_region`
- // and make sure they are included in the `to_region`.
- changed |= op.add_universal_regions_outlived_by_source_to_target()?;
- }
- }
-
- Ok(changed)
- }
-}
-
-/// Customizes the operation of the `dfs` function. This function is
-/// used during inference to satisfy a `R1: R2 @ P` constraint.
-pub(super) trait DfsOp {
- /// If this op stops the walk early, what type does it propagate?
- type Early;
-
- /// Returns the point from which to start the DFS.
- fn start_point(&self) -> Location;
-
- /// Returns true if the source region contains the given point.
- fn source_region_contains(&mut self, point_index: RegionElementIndex) -> bool;
-
- /// Adds the given point to the target region, returning true if
- /// something has changed. Returns `Err` if we should abort the
- /// walk early.
- fn add_to_target_region(
- &mut self,
- point_index: RegionElementIndex,
- ) -> Result<bool, Self::Early>;
-
- /// Adds all universal regions in the source region to the target region, returning
- /// true if something has changed.
- fn add_universal_regions_outlived_by_source_to_target(&mut self) -> Result<bool, Self::Early>;
-}
-
-/// Used during inference to enforce a `R1: R2 @ P` constraint. For
-/// each point Q we reach along the DFS, we check if Q is in R2 (the
-/// "source region"). If not, we stop the walk. Otherwise, we add Q to
-/// R1 (the "target region") and continue to Q's successors. If we
-/// reach the end of the graph, then we add any universal regions from
-/// R2 into R1.
-pub(super) struct CopyFromSourceToTarget<'v> {
- pub source_region: RegionVid,
- pub target_region: RegionVid,
- pub inferred_values: &'v mut RegionValues,
- pub constraint_point: Location,
- pub constraint_span: Span,
-}
-
-impl<'v> DfsOp for CopyFromSourceToTarget<'v> {
- /// We never stop the walk early.
- type Early = !;
-
- fn start_point(&self) -> Location {
- self.constraint_point
- }
-
- fn source_region_contains(&mut self, point_index: RegionElementIndex) -> bool {
- self.inferred_values
- .contains(self.source_region, point_index)
- }
-
- fn add_to_target_region(&mut self, point_index: RegionElementIndex) -> Result<bool, !> {
- Ok(self.inferred_values.add_due_to_outlives(
- self.source_region,
- self.target_region,
- point_index,
- self.constraint_point,
- self.constraint_span,
- ))
- }
-
- fn add_universal_regions_outlived_by_source_to_target(&mut self) -> Result<bool, !> {
- Ok(self.inferred_values.add_universal_regions_outlived_by(
- self.source_region,
- self.target_region,
- self.constraint_point,
- self.constraint_span,
- ))
- }
-}
-
-/// Used after inference to *test* a `R1: R2 @ P` constraint. For
-/// each point Q we reach along the DFS, we check if Q in R2 is also
-/// contained in R1. If not, we abort the walk early with an `Err`
-/// condition. Similarly, if we reach the end of the graph and find
-/// that R1 contains some universal region that R2 does not contain,
-/// we abort the walk early.
-pub(super) struct TestTargetOutlivesSource<'v, 'tcx: 'v> {
- pub source_region: RegionVid,
- pub target_region: RegionVid,
- pub elements: &'v RegionValueElements,
- pub universal_regions: &'v UniversalRegions<'tcx>,
- pub inferred_values: &'v RegionValues,
- pub constraint_point: Location,
-}
-
-impl<'v, 'tcx> DfsOp for TestTargetOutlivesSource<'v, 'tcx> {
- /// The element that was not found within R2.
- type Early = RegionElementIndex;
-
- fn start_point(&self) -> Location {
- self.constraint_point
- }
-
- fn source_region_contains(&mut self, point_index: RegionElementIndex) -> bool {
- self.inferred_values
- .contains(self.source_region, point_index)
- }
-
- fn add_to_target_region(
- &mut self,
- point_index: RegionElementIndex,
- ) -> Result<bool, RegionElementIndex> {
- if !self.inferred_values
- .contains(self.target_region, point_index)
- {
- return Err(point_index);
- }
-
- Ok(false)
- }
-
- fn add_universal_regions_outlived_by_source_to_target(
- &mut self,
- ) -> Result<bool, RegionElementIndex> {
- // For all `ur_in_source` in `source_region`.
- for ur_in_source in self.inferred_values
- .universal_regions_outlived_by(self.source_region)
- {
- // Check that `target_region` outlives `ur_in_source`.
-
- // If `ur_in_source` is a member of `target_region`, OK.
- //
- // (This is implied by the loop below, actually, just an
- // irresistible micro-opt. Mm. Premature optimization. So
- // tasty.)
- if self.inferred_values
- .contains(self.target_region, ur_in_source)
- {
- continue;
- }
-
- // If there is some other element X such that `target_region: X` and
- // `X: ur_in_source`, OK.
- if self.inferred_values
- .universal_regions_outlived_by(self.target_region)
- .any(|ur_in_target| self.universal_regions.outlives(ur_in_target, ur_in_source))
- {
- continue;
- }
-
- // Otherwise, not known to be true.
- return Err(self.elements.index(ur_in_source));
- }
-
- Ok(false)
- }
-}
// except according to those terms.
use super::universal_regions::UniversalRegions;
+use borrow_check::nll::region_infer::values::ToElementIndex;
use rustc::hir::def_id::DefId;
+use rustc::infer::error_reporting::nice_region_error::NiceRegionError;
+use rustc::infer::region_constraints::{GenericKind, VarInfos};
use rustc::infer::InferCtxt;
use rustc::infer::NLLRegionVariableOrigin;
use rustc::infer::RegionObligation;
use rustc::infer::RegionVariableOrigin;
use rustc::infer::SubregionOrigin;
-use rustc::infer::error_reporting::nice_region_error::NiceRegionError;
-use rustc::infer::region_constraints::{GenericKind, VarInfos};
-use rustc::mir::{ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureRegionRequirements,
- Local, Location, Mir};
+use rustc::mir::{
+ ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureRegionRequirements, Local, Location,
+ Mir,
+};
use rustc::traits::ObligationCause;
use rustc::ty::{self, RegionVid, Ty, TypeFoldable};
use rustc::util::common::{self, ErrorReported};
use syntax_pos::Span;
mod annotation;
-mod dfs;
-use self::dfs::{CopyFromSourceToTarget, TestTargetOutlivesSource};
mod dump_mir;
mod graphviz;
mod values;
/// NB: The variants in `Cause` are intentionally ordered. Lower
/// values are preferred when it comes to error messages. Do not
/// reorder willy nilly.
-#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub(crate) enum Cause {
/// point inserted because Local was live at the given Location
LiveVar(Local, Location),
/// part of the initial set of values for a universally quantified region
UniversalRegion(RegionVid),
-
- /// Element E was added to R because there was some
- /// outlives obligation `R: R1 @ P` and `R1` contained `E`.
- Outlives {
- /// the reason that R1 had E
- original_cause: Rc<Cause>,
-
- /// the point P from the relation
- constraint_location: Location,
-
- /// The span indicating why we added the outlives constraint.
- constraint_span: Span,
- },
-}
-
-pub(crate) struct RegionCausalInfo {
- inferred_values: RegionValues,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
// Add all nodes in the CFG to liveness constraints
for point_index in self.elements.all_point_indices() {
- self.liveness_constraints.add(
+ self.liveness_constraints.add_element(
variable,
point_index,
&Cause::UniversalRegion(variable),
}
// Add `end(X)` into the set for X.
- self.liveness_constraints
- .add(variable, variable, &Cause::UniversalRegion(variable));
+ self.liveness_constraints.add_element(
+ variable,
+ variable,
+ &Cause::UniversalRegion(variable),
+ );
}
}
debug!("add_live_point: @{:?} Adding cause {:?}", point, cause);
let element = self.elements.index(point);
- if self.liveness_constraints.add(v, element, &cause) {
+ if self.liveness_constraints.add_element(v, element, &cause) {
true
} else {
false
) -> Option<ClosureRegionRequirements<'gcx>> {
assert!(self.inferred_values.is_none(), "values already inferred");
- let dfs_storage = &mut self.new_dfs_storage();
-
- self.propagate_constraints(mir, dfs_storage);
+ self.propagate_constraints(mir);
// If this is a closure, we can propagate unsatisfied
// `outlives_requirements` to our creator, so create a vector
None
};
- self.check_type_tests(
- infcx,
- mir,
- dfs_storage,
- mir_def_id,
- outlives_requirements.as_mut(),
- );
+ self.check_type_tests(infcx, mir, mir_def_id, outlives_requirements.as_mut());
self.check_universal_regions(infcx, mir_def_id, outlives_requirements.as_mut());
}
}
- /// Re-execute the region inference, this time tracking causal information.
- /// This is significantly slower, so it is done only when an error is being reported.
- pub(super) fn compute_causal_info(&self, mir: &Mir<'tcx>) -> RegionCausalInfo {
- let dfs_storage = &mut self.new_dfs_storage();
- let inferred_values = self.compute_region_values(mir, dfs_storage, TrackCauses(true));
- RegionCausalInfo { inferred_values }
- }
-
/// Propagate the region constraints: this will grow the values
/// for each region variable until all the constraints are
/// satisfied. Note that some values may grow **too** large to be
/// feasible, but we check this later.
- fn propagate_constraints(&mut self, mir: &Mir<'tcx>, dfs_storage: &mut dfs::DfsStorage) {
+ fn propagate_constraints(&mut self, mir: &Mir<'tcx>) {
self.dependency_map = Some(self.build_dependency_map());
- let inferred_values = self.compute_region_values(mir, dfs_storage, TrackCauses(false));
+ let inferred_values = self.compute_region_values(mir);
self.inferred_values = Some(inferred_values);
}
#[inline(never)] // ensure dfs is identifiable in profiles
- fn compute_region_values(
- &self,
- mir: &Mir<'tcx>,
- dfs_storage: &mut dfs::DfsStorage,
- track_causes: TrackCauses,
- ) -> RegionValues {
+ fn compute_region_values(&self, _mir: &Mir<'tcx>) -> RegionValues {
debug!("compute_region_values()");
debug!("compute_region_values: constraints={:#?}", {
let mut constraints: Vec<_> = self.constraints.iter().collect();
// The initial values for each region are derived from the liveness
// constraints we have accumulated.
- let mut inferred_values = self.liveness_constraints.duplicate(track_causes);
+ let mut inferred_values = self.liveness_constraints.duplicate(TrackCauses(false));
let dependency_map = self.dependency_map.as_ref().unwrap();
let constraint = &self.constraints[constraint_idx];
debug!("propagate_constraints: constraint={:?}", constraint);
- // Grow the value as needed to accommodate the
- // outlives constraint.
- let Ok(made_changes) = self.dfs(
- mir,
- dfs_storage,
- CopyFromSourceToTarget {
- source_region: constraint.sub,
- target_region: constraint.sup,
- inferred_values: &mut inferred_values,
- constraint_point: constraint.point,
- constraint_span: constraint.span,
- },
- );
-
- if made_changes {
+ if inferred_values.add_region(constraint.sup, constraint.sub) {
debug!("propagate_constraints: sub={:?}", constraint.sub);
debug!("propagate_constraints: sup={:?}", constraint.sup);
&self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
mir: &Mir<'tcx>,
- dfs_storage: &mut dfs::DfsStorage,
mir_def_id: DefId,
mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'gcx>>>,
) {
for type_test in &self.type_tests {
debug!("check_type_test: {:?}", type_test);
- if self.eval_region_test(
- mir,
- dfs_storage,
- type_test.point,
- type_test.lower_bound,
- &type_test.test,
- ) {
+ if self.eval_region_test(mir, type_test.point, type_test.lower_bound, &type_test.test) {
continue;
}
fn eval_region_test(
&self,
mir: &Mir<'tcx>,
- dfs_storage: &mut dfs::DfsStorage,
point: Location,
lower_bound: RegionVid,
test: &RegionTest,
match test {
RegionTest::IsOutlivedByAllRegionsIn(regions) => regions
.iter()
- .all(|&r| self.eval_outlives(mir, dfs_storage, r, lower_bound, point)),
+ .all(|&r| self.eval_outlives(mir, r, lower_bound, point)),
RegionTest::IsOutlivedByAnyRegionIn(regions) => regions
.iter()
- .any(|&r| self.eval_outlives(mir, dfs_storage, r, lower_bound, point)),
+ .any(|&r| self.eval_outlives(mir, r, lower_bound, point)),
RegionTest::Any(tests) => tests
.iter()
- .any(|test| self.eval_region_test(mir, dfs_storage, point, lower_bound, test)),
+ .any(|test| self.eval_region_test(mir, point, lower_bound, test)),
RegionTest::All(tests) => tests
.iter()
- .all(|test| self.eval_region_test(mir, dfs_storage, point, lower_bound, test)),
+ .all(|test| self.eval_region_test(mir, point, lower_bound, test)),
}
}
// Evaluate whether `sup_region: sub_region @ point`.
fn eval_outlives(
&self,
- mir: &Mir<'tcx>,
- dfs_storage: &mut dfs::DfsStorage,
+ _mir: &Mir<'tcx>,
sup_region: RegionVid,
sub_region: RegionVid,
point: Location,
sup_region, sub_region, point
);
- // Roughly speaking, do a DFS of all region elements reachable
- // from `point` contained in `sub_region`. If any of those are
- // *not* present in `sup_region`, the DFS will abort early and
- // yield an `Err` result.
- match self.dfs(
- mir,
- dfs_storage,
- TestTargetOutlivesSource {
- source_region: sub_region,
- target_region: sup_region,
- constraint_point: point,
- elements: &self.elements,
- universal_regions: &self.universal_regions,
- inferred_values: self.inferred_values.as_ref().unwrap(),
- },
- ) {
- Ok(_) => {
- debug!("eval_outlives: true");
- true
- }
+ let inferred_values = self.inferred_values
+ .as_ref()
+ .expect("values for regions not yet inferred");
- Err(elem) => {
- debug!(
- "eval_outlives: false because `{:?}` is not present in `{:?}`",
- self.elements.to_element(elem),
- sup_region
- );
- false
- }
+ debug!(
+ "eval_outlives: sup_region's value = {:?}",
+ inferred_values.region_value_str(sup_region),
+ );
+ debug!(
+ "eval_outlives: sub_region's value = {:?}",
+ inferred_values.region_value_str(sub_region),
+ );
+
+ // Both the `sub_region` and `sup_region` consist of the union
+ // of some number of universal regions (along with the union
+ // of various points in the CFG; ignore those points for
+ // now). Therefore, the sup-region outlives the sub-region if,
+ // for each universal region R1 in the sub-region, there
+ // exists some region R2 in the sup-region that outlives R1.
+ let universal_outlives = inferred_values
+ .universal_regions_outlived_by(sub_region)
+ .all(|r1| {
+ inferred_values
+ .universal_regions_outlived_by(sup_region)
+ .any(|r2| self.universal_regions.outlives(r2, r1))
+ });
+
+ if !universal_outlives {
+ return false;
+ }
+
+ // Now we have to compare all the points in the sub region and make
+ // sure they exist in the sup region.
+
+ if self.universal_regions.is_universal_region(sup_region) {
+ // Micro-opt: universal regions contain all points.
+ return true;
}
+
+ inferred_values.contains_points(sup_region, sub_region)
}
/// Once regions have been propagated, this method is used to see
longer_fr, shorter_fr,
);
- let blame_span = self.blame_span(longer_fr, shorter_fr);
+ let blame_index = self.blame_constraint(longer_fr, shorter_fr);
+ let blame_span = self.constraints[blame_index].span;
if let Some(propagated_outlives_requirements) = propagated_outlives_requirements {
// Shrink `fr` until we find a non-local region (if we do).
diag.emit();
}
+ crate fn why_region_contains_point(&self, fr1: RegionVid, elem: Location) -> Option<Cause> {
+ // Find some constraint `X: Y` where:
+ // - `fr1: X` transitively
+ // - and `Y` is live at `elem`
+ let index = self.blame_constraint(fr1, elem);
+ let region_sub = self.constraints[index].sub;
+
+ // then return why `Y` was live at `elem`
+ self.liveness_constraints.cause(region_sub, elem)
+ }
+
/// Tries to finds a good span to blame for the fact that `fr1`
/// contains `fr2`.
- fn blame_span(&self, fr1: RegionVid, fr2: RegionVid) -> Span {
+ fn blame_constraint(&self, fr1: RegionVid, elem: impl ToElementIndex) -> ConstraintIndex {
// Find everything that influenced final value of `fr`.
let influenced_fr1 = self.dependencies(fr1);
// of dependencies, which doesn't account for the locations of
// contraints at all. But it will do for now.
let relevant_constraint = self.constraints
- .iter()
- .filter_map(|constraint| {
- if constraint.sub != fr2 {
- None
- } else {
- influenced_fr1[constraint.sup]
- .map(|distance| (distance, constraint.span))
- }
- })
- .min() // constraining fr1 with fewer hops *ought* to be more obvious
- .map(|(_dist, span)| span);
+ .iter_enumerated()
+ .filter_map(|(i, constraint)| {
+ if !self.liveness_constraints.contains(constraint.sub, elem) {
+ None
+ } else {
+ influenced_fr1[constraint.sup]
+ .map(|distance| (distance, i))
+ }
+ })
+ .min() // constraining fr1 with fewer hops *ought* to be more obvious
+ .map(|(_dist, i)| i);
relevant_constraint.unwrap_or_else(|| {
bug!(
"could not find any constraint to blame for {:?}: {:?}",
fr1,
- fr2
+ elem,
);
})
}
}
}
-impl RegionCausalInfo {
- /// Returns the *reason* that the region `r` contains the given point.
- pub(super) fn why_region_contains_point<R>(&self, r: R, p: Location) -> Option<Rc<Cause>>
- where
- R: ToRegionVid,
- {
- self.inferred_values.cause(r.to_region_vid(), p)
- }
-}
-
impl<'tcx> RegionDefinition<'tcx> {
fn new(origin: RegionVariableOrigin) -> Self {
// Create a new region definition. Note that, for free
})
}
}
-
-trait CauseExt {
- fn outlives(&self, constraint_location: Location, constraint_span: Span) -> Cause;
-}
-
-impl CauseExt for Rc<Cause> {
- /// Creates a derived cause due to an outlives constraint.
- fn outlives(&self, constraint_location: Location, constraint_span: Span) -> Cause {
- Cause::Outlives {
- original_cause: self.clone(),
- constraint_location,
- constraint_span,
- }
- }
-}
-
-impl Cause {
- pub(crate) fn root_cause(&self) -> &Cause {
- match self {
- Cause::LiveVar(..)
- | Cause::DropVar(..)
- | Cause::LiveOther(..)
- | Cause::UniversalRegion(..) => self,
-
- Cause::Outlives { original_cause, .. } => original_cause.root_cause(),
- }
- }
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::rc::Rc;
+use borrow_check::nll::region_infer::TrackCauses;
+use rustc::mir::{BasicBlock, Location, Mir};
+use rustc::ty::RegionVid;
use rustc_data_structures::bitvec::SparseBitMatrix;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::indexed_vec::IndexVec;
-use rustc::mir::{BasicBlock, Location, Mir};
-use rustc::ty::{self, RegionVid};
-use syntax::codemap::Span;
+use std::fmt::Debug;
+use std::rc::Rc;
-use super::{Cause, CauseExt, TrackCauses};
+use super::Cause;
/// Maps between the various kinds of elements of a region value to
/// the internal indices that w use.
(0..self.num_points).map(move |i| RegionElementIndex::new(i + self.num_universal_regions))
}
- /// Iterates over the `RegionElementIndex` for all points in the CFG.
- pub(super) fn all_universal_region_indices(&self) -> impl Iterator<Item = RegionElementIndex> {
- (0..self.num_universal_regions).map(move |i| RegionElementIndex::new(i))
- }
-
/// Converts a particular `RegionElementIndex` to the `RegionElement` it represents.
pub(super) fn to_element(&self, i: RegionElementIndex) -> RegionElement {
debug!("to_element(i={:?})", i);
UniversalRegion(RegionVid),
}
-pub(super) trait ToElementIndex {
+pub(super) trait ToElementIndex: Debug + Copy {
fn to_element_index(self, elements: &RegionValueElements) -> RegionElementIndex;
}
causes: Option<CauseMap>,
}
-type CauseMap = FxHashMap<(RegionVid, RegionElementIndex), Rc<Cause>>;
+type CauseMap = FxHashMap<(RegionVid, RegionElementIndex), Cause>;
impl RegionValues {
/// Creates a new set of "region values" that tracks causal information.
/// Adds the given element to the value for the given region. Returns true if
/// the element is newly added (i.e., was not already present).
- pub(super) fn add<E: ToElementIndex>(&mut self, r: RegionVid, elem: E, cause: &Cause) -> bool {
+ pub(super) fn add_element<E: ToElementIndex>(
+ &mut self,
+ r: RegionVid,
+ elem: E,
+ cause: &Cause,
+ ) -> bool {
let i = self.elements.index(elem);
self.add_internal(r, i, |_| cause.clone())
}
+ /// Add all elements in `r_from` to `r_to` (because e.g. `r_to:
+ /// r_from`).
+ pub(super) fn add_region(&mut self, r_to: RegionVid, r_from: RegionVid) -> bool {
+ self.matrix.merge(r_from, r_to)
+ }
+
/// Internal method to add an element to a region.
///
/// Takes a "lazy" cause -- this function will return the cause, but it will only
debug!("add(r={:?}, i={:?})", r, self.elements.to_element(i));
if let Some(causes) = &mut self.causes {
- let cause = Rc::new(make_cause(causes));
+ let cause = make_cause(causes);
causes.insert((r, i), cause);
}
// #49998: compare using root cause alone to avoid
// useless traffic from similar outlives chains.
- let overwrite = if ty::tls::with(|tcx| {
- tcx.sess.opts.debugging_opts.nll_subminimal_causes
- }) {
- cause.root_cause() < old_cause.root_cause()
- } else {
- cause < **old_cause
- };
- if overwrite {
- *old_cause = Rc::new(cause);
+ if cause < *old_cause {
+ *old_cause = cause;
return true;
}
}
}
}
- /// Adds `elem` to `to_region` because of a relation:
- ///
- /// to_region: from_region @ constraint_location
- ///
- /// that was added by the cod at `constraint_span`.
- pub(super) fn add_due_to_outlives<T: ToElementIndex>(
- &mut self,
- from_region: RegionVid,
- to_region: RegionVid,
- elem: T,
- constraint_location: Location,
- constraint_span: Span,
- ) -> bool {
- let elem = self.elements.index(elem);
- self.add_internal(to_region, elem, |causes| {
- causes[&(from_region, elem)].outlives(constraint_location, constraint_span)
- })
- }
-
- /// Adds all the universal regions outlived by `from_region` to
- /// `to_region`.
- pub(super) fn add_universal_regions_outlived_by(
- &mut self,
- from_region: RegionVid,
- to_region: RegionVid,
- constraint_location: Location,
- constraint_span: Span,
- ) -> bool {
- // We could optimize this by improving `SparseBitMatrix::merge` so
- // it does not always merge an entire row. That would
- // complicate causal tracking though.
- debug!(
- "add_universal_regions_outlived_by(from_region={:?}, to_region={:?})",
- from_region, to_region
- );
- let mut changed = false;
- for elem in self.elements.all_universal_region_indices() {
- if self.contains(from_region, elem) {
- changed |= self.add_due_to_outlives(
- from_region,
- to_region,
- elem,
- constraint_location,
- constraint_span,
- );
- }
- }
- changed
- }
-
/// True if the region `r` contains the given element.
pub(super) fn contains<E: ToElementIndex>(&self, r: RegionVid, elem: E) -> bool {
let i = self.elements.index(elem);
self.matrix.contains(r, i)
}
+ /// True if `sup_region` contains all the CFG points that
+ /// `sub_region` contains. Ignores universal regions.
+ pub(super) fn contains_points(&self, sup_region: RegionVid, sub_region: RegionVid) -> bool {
+ // This could be done faster by comparing the bitsets. But I
+ // am lazy.
+ self.element_indices_contained_in(sub_region)
+ .skip_while(|&i| self.elements.to_universal_region(i).is_some())
+ .all(|e| self.contains(sup_region, e))
+ }
+
/// Iterate over the value of the region `r`, yielding up element
/// indices. You may prefer `universal_regions_outlived_by` or
/// `elements_contained_in`.
///
/// Returns None if cause tracking is disabled or `elem` is not
/// actually found in `r`.
- pub(super) fn cause<T: ToElementIndex>(&self, r: RegionVid, elem: T) -> Option<Rc<Cause>> {
+ pub(super) fn cause<T: ToElementIndex>(&self, r: RegionVid, elem: T) -> Option<Cause> {
let index = self.elements.index(elem);
if let Some(causes) = &self.causes {
causes.get(&(r, index)).cloned()
// except according to those terms.
use rustc::infer::canonical::{Canonical, QueryResult};
-use rustc::traits::{self, FulfillmentContext, Normalized, ObligationCause,
- SelectionContext};
+use rustc::traits::{self, FulfillmentContext, ObligationCause, SelectionContext};
use rustc::traits::query::{CanonicalProjectionGoal, NoSolution, normalize::NormalizationResult};
use rustc::ty::{ParamEnvAnd, TyCtxt};
use rustc_data_structures::sync::Lrc;
let fulfill_cx = &mut FulfillmentContext::new();
let selcx = &mut SelectionContext::new(infcx);
let cause = ObligationCause::misc(DUMMY_SP, DUMMY_NODE_ID);
- let Normalized {
- value: answer,
- obligations,
- } = traits::normalize_projection_type(selcx, param_env, goal, cause, 0);
+ let mut obligations = vec![];
+ let answer =
+ traits::normalize_projection_type(selcx, param_env, goal, cause, 0, &mut obligations);
fulfill_cx.register_predicate_obligations(infcx, obligations);
// Now that we have fulfilled as much as we can, create a solution
}
let mut selcx = traits::SelectionContext::new(self.fcx);
- let normalized = traits::normalize_projection_type(&mut selcx,
- self.fcx.param_env,
- ty::ProjectionTy::from_ref_and_name(
- tcx,
- trait_ref,
- Symbol::intern("Target"),
- ),
- cause,
- 0);
-
- debug!("overloaded_deref_ty({:?}) = {:?}", ty, normalized);
- self.obligations.extend(normalized.obligations);
-
- Some(self.fcx.resolve_type_vars_if_possible(&normalized.value))
+ let normalized_ty = traits::normalize_projection_type(&mut selcx,
+ self.fcx.param_env,
+ ty::ProjectionTy::from_ref_and_name(
+ tcx,
+ trait_ref,
+ Symbol::intern("Target"),
+ ),
+ cause,
+ 0,
+ &mut self.obligations);
+
+ debug!("overloaded_deref_ty({:?}) = {:?}", ty, normalized_ty);
+
+ Some(self.fcx.resolve_type_vars_if_possible(&normalized_ty))
}
/// Returns the final type, generating an error if it is an
debug!(">>> block comment");
let p = rdr.pos;
let mut lines: Vec<String> = Vec::new();
- let col = rdr.col;
+
+ // Count the number of chars since the start of the line by rescanning.
+ let mut src_index = rdr.src_index(rdr.filemap.line_begin_pos());
+ let end_src_index = rdr.src_index(rdr.pos);
+ assert!(src_index <= end_src_index);
+ let mut n = 0;
+ while src_index < end_src_index {
+ let c = char_at(&rdr.src, src_index);
+ src_index += c.len_utf8();
+ n += 1;
+ }
+ let col = CharPos(n);
+
rdr.bump();
rdr.bump();
pub next_pos: BytePos,
/// The absolute offset within the codemap of the current character
pub pos: BytePos,
- /// The column of the next character to read
- pub col: CharPos,
/// The current character (which has been read from self.pos)
pub ch: Option<char>,
pub filemap: Lrc<syntax_pos::FileMap>,
- /// If Some, stop reading the source at this position (inclusive).
- pub terminator: Option<BytePos>,
+ /// Stop reading src at this index.
+ pub end_src_index: usize,
/// Whether to record new-lines and multibyte chars in filemap.
/// This is only necessary the first time a filemap is lexed.
/// If part of a filemap is being re-lexed, this should be set to false.
pub fatal_errs: Vec<DiagnosticBuilder<'a>>,
// cache a direct reference to the source text, so that we don't have to
// retrieve it via `self.filemap.src.as_ref().unwrap()` all the time.
- source_text: Lrc<String>,
+ src: Lrc<String>,
/// Stack of open delimiters and their spans. Used for error message.
token: token::Token,
span: Span,
self.unwrap_or_abort(res)
}
fn is_eof(&self) -> bool {
- if self.ch.is_none() {
- return true;
- }
-
- match self.terminator {
- Some(t) => self.next_pos > t,
- None => false,
- }
+ self.ch.is_none()
}
/// Return the next token. EFFECT: advances the string_reader.
pub fn try_next_token(&mut self) -> Result<TokenAndSpan, ()> {
filemap.name));
}
- let source_text = (*filemap.src.as_ref().unwrap()).clone();
+ let src = (*filemap.src.as_ref().unwrap()).clone();
StringReader {
sess,
next_pos: filemap.start_pos,
pos: filemap.start_pos,
- col: CharPos(0),
ch: Some('\n'),
filemap,
- terminator: None,
+ end_src_index: src.len(),
save_new_lines_and_multibyte: true,
// dummy values; not read
peek_tok: token::Eof,
peek_span: syntax_pos::DUMMY_SP,
- source_text,
+ src,
fatal_errs: Vec::new(),
token: token::Eof,
span: syntax_pos::DUMMY_SP,
// Seek the lexer to the right byte range.
sr.save_new_lines_and_multibyte = false;
sr.next_pos = span.lo();
- sr.terminator = Some(span.hi());
+ sr.end_src_index = sr.src_index(span.hi());
sr.bump();
/// offending string to the error message
fn fatal_span_verbose(&self, from_pos: BytePos, to_pos: BytePos, mut m: String) -> FatalError {
m.push_str(": ");
- let from = self.byte_offset(from_pos).to_usize();
- let to = self.byte_offset(to_pos).to_usize();
- m.push_str(&self.source_text[from..to]);
+ m.push_str(&self.src[self.src_index(from_pos)..self.src_index(to_pos)]);
self.fatal_span_(from_pos, to_pos, &m[..])
}
Ok(())
}
- fn byte_offset(&self, pos: BytePos) -> BytePos {
- (pos - self.filemap.start_pos)
+ #[inline]
+ fn src_index(&self, pos: BytePos) -> usize {
+ (pos - self.filemap.start_pos).to_usize()
}
/// Calls `f` with a string slice of the source text spanning from `start`
fn with_str_from_to<T, F>(&self, start: BytePos, end: BytePos, f: F) -> T
where F: FnOnce(&str) -> T
{
- f(&self.source_text[self.byte_offset(start).to_usize()..self.byte_offset(end).to_usize()])
+ f(&self.src[self.src_index(start)..self.src_index(end)])
}
/// Converts CRLF to LF in the given string, raising an error on bare CR.
}
}
-
/// Advance the StringReader by one character. If a newline is
/// discovered, add it to the FileMap's list of line start offsets.
pub fn bump(&mut self) {
- let new_pos = self.next_pos;
- let new_byte_offset = self.byte_offset(new_pos).to_usize();
- let end = self.terminator.map_or(self.source_text.len(), |t| {
- self.byte_offset(t).to_usize()
- });
- if new_byte_offset < end {
- let old_ch_is_newline = self.ch.unwrap() == '\n';
- let new_ch = char_at(&self.source_text, new_byte_offset);
- let new_ch_len = new_ch.len_utf8();
-
- self.ch = Some(new_ch);
- self.pos = new_pos;
- self.next_pos = new_pos + Pos::from_usize(new_ch_len);
- if old_ch_is_newline {
+ let next_src_index = self.src_index(self.next_pos);
+ if next_src_index < self.end_src_index {
+ let next_ch = char_at(&self.src, next_src_index);
+ let next_ch_len = next_ch.len_utf8();
+
+ if self.ch.unwrap() == '\n' {
if self.save_new_lines_and_multibyte {
- self.filemap.next_line(self.pos);
+ self.filemap.next_line(self.next_pos);
}
- self.col = CharPos(0);
- } else {
- self.col = self.col + CharPos(1);
}
- if new_ch_len > 1 {
+ if next_ch_len > 1 {
if self.save_new_lines_and_multibyte {
- self.filemap.record_multibyte_char(self.pos, new_ch_len);
+ self.filemap.record_multibyte_char(self.next_pos, next_ch_len);
}
}
- self.filemap.record_width(self.pos, new_ch);
+ self.filemap.record_width(self.next_pos, next_ch);
+
+ self.ch = Some(next_ch);
+ self.pos = self.next_pos;
+ self.next_pos = self.next_pos + Pos::from_usize(next_ch_len);
} else {
self.ch = None;
- self.pos = new_pos;
+ self.pos = self.next_pos;
}
}
pub fn nextch(&self) -> Option<char> {
- let offset = self.byte_offset(self.next_pos).to_usize();
- if offset < self.source_text.len() {
- Some(char_at(&self.source_text, offset))
+ let next_src_index = self.src_index(self.next_pos);
+ if next_src_index < self.end_src_index {
+ Some(char_at(&self.src, next_src_index))
} else {
None
}
}
pub fn nextnextch(&self) -> Option<char> {
- let offset = self.byte_offset(self.next_pos).to_usize();
- let s = &self.source_text[..];
- if offset >= s.len() {
- return None;
- }
- let next = offset + char_at(s, offset).len_utf8();
- if next < s.len() {
- Some(char_at(s, next))
- } else {
- None
+ let next_src_index = self.src_index(self.next_pos);
+ if next_src_index < self.end_src_index {
+ let next_next_src_index =
+ next_src_index + char_at(&self.src, next_src_index).len_utf8();
+ if next_next_src_index < self.end_src_index {
+ return Some(char_at(&self.src, next_next_src_index));
+ }
}
+ None
}
pub fn nextnextch_is(&self, c: char) -> bool {
loop {
self.bump();
if self.ch_is('\'') {
- let start = self.byte_offset(start).to_usize();
- let end = self.byte_offset(self.pos).to_usize();
+ let start = self.src_index(start);
+ let end = self.src_index(self.pos);
self.bump();
let span = self.mk_sp(start_with_quote, self.pos);
self.sess.span_diagnostic
.span_suggestion(span,
"if you meant to write a `str` literal, \
use double quotes",
- format!("\"{}\"",
- &self.source_text[start..end]))
+ format!("\"{}\"", &self.src[start..end]))
.emit();
return Ok(token::Literal(token::Str_(Symbol::intern("??")), None))
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#[inline]
pub fn char_at(s: &str, byte: usize) -> char {
s[byte..].chars().next().unwrap()
}
}
if !parser.errors.is_empty() {
- let (err, note) = parser.errors.remove(0);
- let mut e = cx.ecx.struct_span_err(cx.fmtsp, &format!("invalid format string: {}", err));
- if let Some(note) = note {
+ let err = parser.errors.remove(0);
+ let sp = cx.fmtsp.from_inner_byte_pos(err.start, err.end);
+ let mut e = cx.ecx.struct_span_err(sp, &format!("invalid format string: {}",
+ err.description));
+ e.span_label(sp, err.label + " in format string");
+ if let Some(note) = err.note {
e.note(¬e);
}
e.emit();
)
}
+ pub fn from_inner_byte_pos(self, start: usize, end: usize) -> Span {
+ let span = self.data();
+ Span::new(span.lo + BytePos::from_usize(start),
+ span.lo + BytePos::from_usize(end),
+ span.ctxt)
+ }
+
#[inline]
pub fn apply_mark(self, mark: Mark) -> Span {
let span = self.data();
lines.push(pos);
}
+ /// Return the BytePos of the beginning of the current line.
+ pub fn line_begin_pos(&self) -> BytePos {
+ let lines = self.lines.borrow();
+ match lines.last() {
+ Some(&line_pos) => line_pos,
+ None => self.start_pos,
+ }
+ }
+
/// Add externally loaded source.
/// If the hash of the input doesn't match or no input is supplied via None,
/// it is interpreted as an error and the corresponding enum variant is set.
self.multibyte_chars.borrow_mut().push(mbc);
}
+ #[inline]
pub fn record_width(&self, pos: BytePos, ch: char) {
let width = match ch {
'\t' =>
for &(ref f, ref stdout) in &state.not_failures {
successes.push(f.name.to_string());
if !stdout.is_empty() {
- stdouts.push_str(&format!("---- {} stdout ----\n\t", f.name));
+ stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
let output = String::from_utf8_lossy(stdout);
stdouts.push_str(&output);
stdouts.push_str("\n");
for &(ref f, ref stdout) in &state.failures {
failures.push(f.name.to_string());
if !stdout.is_empty() {
- fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
+ fail_out.push_str(&format!("---- {} stdout ----\n", f.name));
let output = String::from_utf8_lossy(stdout);
fail_out.push_str(&output);
fail_out.push_str("\n");
for &(ref f, ref stdout) in &state.not_failures {
successes.push(f.name.to_string());
if !stdout.is_empty() {
- stdouts.push_str(&format!("---- {} stdout ----\n\t", f.name));
+ stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
let output = String::from_utf8_lossy(stdout);
stdouts.push_str(&output);
stdouts.push_str("\n");
for &(ref f, ref stdout) in &state.failures {
failures.push(f.name.to_string());
if !stdout.is_empty() {
- fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
+ fail_out.push_str(&format!("---- {} stdout ----\n", f.name));
let output = String::from_utf8_lossy(stdout);
fail_out.push_str(&output);
fail_out.push_str("\n");
### Editing and updating the reference files
If you have changed the compiler's output intentionally, or you are
-making a new test, you can use the script `ui/update-references.sh` to
-update the references. When you run the test framework, it will report
-various errors: in those errors is a command you can use to run the
-`ui/update-references.sh` script, which will then copy over the files
-from the build directory and use them as the new reference. You can
-also just run `ui/update-all-references.sh`. In both cases, you can run
-the script with `--help` to get a help message.
+making a new test, you can pass `--bless` to the command you used to
+run the tests. This will then copy over the files
+from the build directory and use them as the new reference.
### Normalization
// borrows in `&v[0]` and `&v[1]` each (in theory) have to outlive R3,
// but only at a particular point, and hence they wind up including
// distinct regions.
+//
+// FIXME(#43234) -- Well, this used to be true, but we modified NLL
+// for the time being to not take location into account.
// compile-flags:-Zborrowck=mir -Zverbose
// ^^^^^^^^^ force compiler to dump more region information
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | '_#2r | {bb2[0..=1], bb3[0..=1]}
+// | '_#2r | {bb2[0..=1], bb3[0..=1], bb8[2..=4]}
// ...
-// | '_#4r | {bb8[1..=4]}
+// | '_#4r | {bb2[1], bb3[0..=1], bb8[1..=4]}
// | '_#5r | {bb2[1], bb3[0..=1], bb8[2..=4]}
// ...
// let mut _2: &'_#5r usize;
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(nll)]
-
-use std::collections::HashMap;
-
-fn get_default(map: &mut HashMap<usize, String>, key: usize) -> &mut String {
- match map.get_mut(&key) {
- Some(value) => value,
- None => {
- map.insert(key, "".to_string());
- map.get_mut(&key).unwrap()
- }
- }
-}
-
-fn main() {
- let map = &mut HashMap::new();
- map.insert(22, format!("Hello, world"));
- map.insert(44, format!("Goodbye, world"));
- assert_eq!(&*get_default(map, 22), "Hello, world");
- assert_eq!(&*get_default(map, 66), "");
-}
--- /dev/null
+error[E0508]: cannot move out of type `[NonCopy; 1]`, a non-copy array
+ --> $DIR/E0508.rs:18:18
+ |
+LL | let _value = array[0]; //[ast]~ ERROR [E0508]
+ | ^^^^^^^^ cannot move out of here
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0508`.
--- /dev/null
+error[E0508]: cannot move out of type `[NonCopy; 1]`, a non-copy array
+ --> $DIR/E0508.rs:18:18
+ |
+LL | let _value = array[0]; //[ast]~ ERROR [E0508]
+ | ^^^^^^^^
+ | |
+ | cannot move out of here
+ | help: consider using a reference instead: `&array[0]`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0508`.
--- /dev/null
+error[E0508]: cannot move out of type `[NonCopy; 1]`, a non-copy array
+ --> $DIR/E0508.rs:18:18
+ |
+LL | let _value = array[0]; //[ast]~ ERROR [E0508]
+ | ^^^^^^^^ cannot move out of here
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0508`.
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: ast mir
+//[mir]compile-flags: -Z borrowck=mir
+
+struct NonCopy;
+
+fn main() {
+ let array = [NonCopy; 1];
+ let _value = array[0]; //[ast]~ ERROR [E0508]
+ //[mir]~^ ERROR [E0508]
+}
--> $DIR/mut-borrow-in-loop.rs:20:25
|
LL | (self.func)(arg) //~ ERROR cannot borrow
- | ^^^ mutable borrow starts here in previous iteration of loop
+ | ------------^^^-
+ | | |
+ | | mutable borrow starts here in previous iteration of loop
+ | borrow later used here
error[E0499]: cannot borrow `*arg` as mutable more than once at a time
--> $DIR/mut-borrow-in-loop.rs:26:25
|
LL | (self.func)(arg) //~ ERROR cannot borrow
- | ^^^ mutable borrow starts here in previous iteration of loop
+ | ------------^^^-
+ | | |
+ | | mutable borrow starts here in previous iteration of loop
+ | borrow later used here
error[E0499]: cannot borrow `*arg` as mutable more than once at a time
--> $DIR/mut-borrow-in-loop.rs:33:25
|
LL | (self.func)(arg) //~ ERROR cannot borrow
- | ^^^ mutable borrow starts here in previous iteration of loop
+ | ------------^^^-
+ | | |
+ | | mutable borrow starts here in previous iteration of loop
+ | borrow later used here
error: aborting due to 3 previous errors
println!("{");
println!("{{}}");
println!("}");
+ let _ = format!("{_foo}", _foo = 6usize);
+ //~^ ERROR invalid format string: invalid argument name `_foo`
+ let _ = format!("{_}", _ = 6usize);
+ //~^ ERROR invalid format string: invalid argument name `_`
+ let _ = format!("{");
+ //~^ ERROR invalid format string: expected `'}'` but string was terminated
+ let _ = format!("}");
+ //~^ ERROR invalid format string: unmatched `}` found
+ let _ = format!("{\\}");
+ //~^ ERROR invalid format string: expected `'}'`, found `'\\'`
}
-
--> $DIR/format-string-error.rs:12:5
|
LL | println!("{");
- | ^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^^ expected `'}'` in format string
|
= note: if you intended to print `{`, you can escape it using `{{`
= note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
--> $DIR/format-string-error.rs:14:5
|
LL | println!("}");
- | ^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^^ unmatched `}` in format string
|
= note: if you intended to print `}`, you can escape it using `}}`
= note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
-error: aborting due to 2 previous errors
+error: invalid format string: invalid argument name `_foo`
+ --> $DIR/format-string-error.rs:15:23
+ |
+LL | let _ = format!("{_foo}", _foo = 6usize);
+ | ^^^^ invalid argument name in format string
+ |
+ = note: argument names cannot start with an underscore
+
+error: invalid format string: invalid argument name `_`
+ --> $DIR/format-string-error.rs:17:23
+ |
+LL | let _ = format!("{_}", _ = 6usize);
+ | ^ invalid argument name in format string
+ |
+ = note: argument names cannot start with an underscore
+
+error: invalid format string: expected `'}'` but string was terminated
+ --> $DIR/format-string-error.rs:19:23
+ |
+LL | let _ = format!("{");
+ | ^ expected `'}'` in format string
+ |
+ = note: if you intended to print `{`, you can escape it using `{{`
+
+error: invalid format string: unmatched `}` found
+ --> $DIR/format-string-error.rs:21:22
+ |
+LL | let _ = format!("}");
+ | ^ unmatched `}` in format string
+ |
+ = note: if you intended to print `}`, you can escape it using `}}`
+
+error: invalid format string: expected `'}'`, found `'/'`
+ --> $DIR/format-string-error.rs:23:23
+ |
+LL | let _ = format!("{/}");
+ | ^ expected `}` in format string
+
+error: aborting due to 7 previous errors
LL | match map.get() {
| --- immutable borrow occurs here
...
-LL | map.set(String::new()); // Just AST errors here
+LL | map.set(String::new()); // Ideally, this would not error.
| ^^^ mutable borrow occurs here
...
LL | }
| - immutable borrow ends here
error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Ast)
- --> $DIR/get_default.rs:44:17
+ --> $DIR/get_default.rs:45:17
|
LL | match map.get() {
| --- immutable borrow occurs here
| - immutable borrow ends here
error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Ast)
- --> $DIR/get_default.rs:50:17
+ --> $DIR/get_default.rs:51:17
|
LL | match map.get() {
| --- immutable borrow occurs here
...
-LL | map.set(String::new()); // Just AST errors here
+LL | map.set(String::new()); // Ideally, just AST would error here
| ^^^ mutable borrow occurs here
...
LL | }
| - immutable borrow ends here
error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Mir)
- --> $DIR/get_default.rs:44:17
+ --> $DIR/get_default.rs:33:17
+ |
+LL | match map.get() {
+ | --- immutable borrow occurs here
+...
+LL | map.set(String::new()); // Ideally, this would not error.
+ | ^^^^^^^^^^^^^^^^^^^^^^ mutable borrow occurs here
+ |
+note: borrowed value must be valid for the anonymous lifetime #1 defined on the function body at 26:1...
+ --> $DIR/get_default.rs:26:1
+ |
+LL | / fn ok(map: &mut Map) -> &String {
+LL | | loop {
+LL | | match map.get() {
+LL | | Some(v) => {
+... |
+LL | | }
+LL | | }
+ | |_^
+
+error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Mir)
+ --> $DIR/get_default.rs:45:17
|
LL | match map.get() {
| --- immutable borrow occurs here
LL | return v;
| - borrow later used here
-error: aborting due to 4 previous errors
+error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Mir)
+ --> $DIR/get_default.rs:51:17
+ |
+LL | match map.get() {
+ | --- immutable borrow occurs here
+...
+LL | map.set(String::new()); // Ideally, just AST would error here
+ | ^^^^^^^^^^^^^^^^^^^^^^ mutable borrow occurs here
+ |
+note: borrowed value must be valid for the anonymous lifetime #1 defined on the function body at 41:1...
+ --> $DIR/get_default.rs:41:1
+ |
+LL | / fn err(map: &mut Map) -> &String {
+LL | | loop {
+LL | | match map.get() {
+LL | | Some(v) => {
+... |
+LL | | }
+LL | | }
+ | |_^
+
+error: aborting due to 6 previous errors
For more information about this error, try `rustc --explain E0502`.
return v;
}
None => {
- map.set(String::new()); // Just AST errors here
+ map.set(String::new()); // Ideally, this would not error.
//~^ ERROR borrowed as immutable (Ast)
+ //~| ERROR borrowed as immutable (Mir)
}
}
}
return v;
}
None => {
- map.set(String::new()); // Just AST errors here
+ map.set(String::new()); // Ideally, just AST would error here
//~^ ERROR borrowed as immutable (Ast)
+ //~| ERROR borrowed as immutable (Mir)
}
}
}
LL | match map.get() {
| --- immutable borrow occurs here
...
-LL | map.set(String::new()); // Just AST errors here
+LL | map.set(String::new()); // Ideally, this would not error.
| ^^^ mutable borrow occurs here
...
LL | }
| - immutable borrow ends here
error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Ast)
- --> $DIR/get_default.rs:44:17
+ --> $DIR/get_default.rs:45:17
|
LL | match map.get() {
| --- immutable borrow occurs here
| - immutable borrow ends here
error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Ast)
- --> $DIR/get_default.rs:50:17
+ --> $DIR/get_default.rs:51:17
|
LL | match map.get() {
| --- immutable borrow occurs here
...
-LL | map.set(String::new()); // Just AST errors here
+LL | map.set(String::new()); // Ideally, just AST would error here
| ^^^ mutable borrow occurs here
...
LL | }
| - immutable borrow ends here
error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Mir)
- --> $DIR/get_default.rs:44:17
+ --> $DIR/get_default.rs:33:17
+ |
+LL | match map.get() {
+ | --- immutable borrow occurs here
+...
+LL | map.set(String::new()); // Ideally, this would not error.
+ | ^^^ mutable borrow occurs here
+ |
+note: borrowed value must be valid for the anonymous lifetime #1 defined on the function body at 26:1...
+ --> $DIR/get_default.rs:26:1
+ |
+LL | / fn ok(map: &mut Map) -> &String {
+LL | | loop {
+LL | | match map.get() {
+LL | | Some(v) => {
+... |
+LL | | }
+LL | | }
+ | |_^
+
+error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Mir)
+ --> $DIR/get_default.rs:51:17
+ |
+LL | match map.get() {
+ | --- immutable borrow occurs here
+...
+LL | map.set(String::new()); // Ideally, just AST would error here
+ | ^^^ mutable borrow occurs here
+ |
+note: borrowed value must be valid for the anonymous lifetime #1 defined on the function body at 41:1...
+ --> $DIR/get_default.rs:41:1
+ |
+LL | / fn err(map: &mut Map) -> &String {
+LL | | loop {
+LL | | match map.get() {
+LL | | Some(v) => {
+... |
+LL | | }
+LL | | }
+ | |_^
+
+error[E0502]: cannot borrow `*map` as mutable because it is also borrowed as immutable (Mir)
+ --> $DIR/get_default.rs:45:17
|
LL | match map.get() {
| --- immutable borrow occurs here
LL | return v;
| - borrow later used here
-error: aborting due to 4 previous errors
+error: aborting due to 6 previous errors
For more information about this error, try `rustc --explain E0502`.
#[derive(Clone)]
pub struct Config {
+ /// Whether to overwrite stderr/stdout files instead of complaining about changes in output
+ pub bless: bool,
+
/// The library paths required for running the compiler
pub compile_lib_path: PathBuf,
"FLAGS",
)
.optflag("", "verbose", "run tests verbosely, showing all output")
+ .optflag(
+ "",
+ "bless",
+ "overwrite stderr/stdout files instead of complaining about a mismatch",
+ )
.optflag(
"",
"quiet",
let src_base = opt_path(matches, "src-base");
let run_ignored = matches.opt_present("ignored");
Config {
+ bless: matches.opt_present("bless"),
compile_lib_path: make_absolute(opt_path(matches, "compile-lib-path")),
run_lib_path: make_absolute(opt_path(matches, "run-lib-path")),
rustc_path: opt_path(matches, "rustc-path"),
}
if errors > 0 {
- println!("To update references, run this command from build directory:");
+ println!("To update references, rerun the tests and pass the `--bless` flag");
let relative_path_to_file = self.testpaths
.relative_dir
.join(self.testpaths.file.file_name().unwrap());
println!(
- "{}/update-references.sh '{}' '{}'",
- self.config.src_base.display(),
- self.config.build_base.display(),
- relative_path_to_file.display()
+ "To only update this specific test, also pass `--test-args {}`",
+ relative_path_to_file.display(),
);
self.fatal_proc_rec(
&format!("{} errors occurred comparing output.", errors),
return 0;
}
- if expected.is_empty() {
- println!("normalized {}:\n{}\n", kind, actual);
- } else {
- println!("diff of {}:\n", kind);
- let diff_results = make_diff(expected, actual, 3);
- for result in diff_results {
- let mut line_number = result.line_number;
- for line in result.lines {
- match line {
- DiffLine::Expected(e) => {
- println!("-\t{}", e);
- line_number += 1;
- }
- DiffLine::Context(c) => {
- println!("{}\t{}", line_number, c);
- line_number += 1;
- }
- DiffLine::Resulting(r) => {
- println!("+\t{}", r);
+ if !self.config.bless {
+ if expected.is_empty() {
+ println!("normalized {}:\n{}\n", kind, actual);
+ } else {
+ println!("diff of {}:\n", kind);
+ let diff_results = make_diff(expected, actual, 3);
+ for result in diff_results {
+ let mut line_number = result.line_number;
+ for line in result.lines {
+ match line {
+ DiffLine::Expected(e) => {
+ println!("-\t{}", e);
+ line_number += 1;
+ }
+ DiffLine::Context(c) => {
+ println!("{}\t{}", line_number, c);
+ line_number += 1;
+ }
+ DiffLine::Resulting(r) => {
+ println!("+\t{}", r);
+ }
}
}
+ println!("");
}
- println!("");
}
}
.with_extra_extension(mode)
.with_extra_extension(kind);
- match File::create(&output_file).and_then(|mut f| f.write_all(actual.as_bytes())) {
- Ok(()) => {}
- Err(e) => self.fatal(&format!(
- "failed to write {} to `{}`: {}",
+ let mut files = vec![output_file];
+ if self.config.bless {
+ files.push(expected_output_path(
+ self.testpaths,
+ self.revision,
+ &self.config.compare_mode,
kind,
- output_file.display(),
- e
- )),
+ ));
+ }
+
+ for output_file in &files {
+ if actual.is_empty() {
+ if let Err(e) = ::std::fs::remove_file(output_file) {
+ self.fatal(&format!(
+ "failed to delete `{}`: {}",
+ output_file.display(),
+ e,
+ ));
+ }
+ } else {
+ match File::create(&output_file).and_then(|mut f| f.write_all(actual.as_bytes())) {
+ Ok(()) => {}
+ Err(e) => self.fatal(&format!(
+ "failed to write {} to `{}`: {}",
+ kind,
+ output_file.display(),
+ e
+ )),
+ }
+ }
}
println!("\nThe actual {0} differed from the expected {0}.", kind);
- println!("Actual {} saved to {}", kind, output_file.display());
- 1
+ for output_file in files {
+ println!("Actual {} saved to {}", kind, output_file.display());
+ }
+ if self.config.bless {
+ 0
+ } else {
+ 1
+ }
}
fn create_stamp(&self) {
-Subproject commit 3e3df0485004bc1343bc8200b68c67ac7c479b28
+Subproject commit cf0609d0af0b734d4b9ee9dce6df66f946fc763f
-Subproject commit db8cb0b8d6942d42a322b1d36b2504977404f362
+Subproject commit bf2581bf7709b91c4431ba7074de910f72283e1f