]> git.lizzy.rs Git - rust.git/commitdiff
Auto merge of #41154 - bluss:slice-rfind, r=alexcrichton
authorbors <bors@rust-lang.org>
Sun, 9 Apr 2017 03:30:09 +0000 (03:30 +0000)
committerbors <bors@rust-lang.org>
Sun, 9 Apr 2017 03:30:09 +0000 (03:30 +0000)
Implement .rfind() for slice iterators Iter and IterMut

Just like the forward case find, implement rfind explicitly for slice iterators Iter and IterMut.

34 files changed:
src/bootstrap/bootstrap.py
src/ci/init_repo.sh
src/doc/unstable-book/src/SUMMARY.md
src/doc/unstable-book/src/abi-msp430-interrupt.md
src/doc/unstable-book/src/abi-ptx.md
src/doc/unstable-book/src/compiler-barriers.md [new file with mode: 0644]
src/doc/unstable-book/src/compiler-builtins-lib.md
src/doc/unstable-book/src/compiler-builtins.md
src/libcore/sync/atomic.rs
src/libcore/tests/atomic.rs
src/librustc/hir/intravisit.rs
src/librustc/hir/lowering.rs
src/librustc/hir/mod.rs
src/librustc/hir/print.rs
src/librustc/ich/impls_hir.rs
src/librustc_borrowck/borrowck/mir/dataflow/mod.rs
src/librustc_borrowck/borrowck/mir/elaborate_drops.rs
src/librustc_borrowck/borrowck/mir/mod.rs
src/librustc_mir/transform/qualify_consts.rs
src/librustc_resolve/lib.rs
src/librustc_typeck/astconv.rs
src/librustc_typeck/check/dropck.rs
src/librustdoc/clean/mod.rs
src/libsyntax/ast.rs
src/libsyntax/feature_gate.rs
src/libsyntax/fold.rs
src/libsyntax/parse/parser.rs
src/libsyntax/print/pprust.rs
src/libsyntax/visit.rs
src/test/compile-fail/const-match-pattern-arm.rs [new file with mode: 0644]
src/test/compile-fail/issue-35675.rs [new file with mode: 0644]
src/test/mir-opt/issue-41110.rs [new file with mode: 0644]
src/test/ui/span/issue-34264.rs [new file with mode: 0644]
src/test/ui/span/issue-34264.stderr [new file with mode: 0644]

index 526beb41aae5b6dc3a63e13173dd247a6c9058bd..2e33b4511949d208723bf5fe8858712a30b5d2ac 100644 (file)
@@ -472,10 +472,10 @@ class RustBuild(object):
             cputype = 'i686'
         elif cputype in {'xscale', 'arm'}:
             cputype = 'arm'
-        elif cputype in {'armv6l', 'armv7l', 'armv8l'}:
+        elif cputype == 'armv6l':
             cputype = 'arm'
             ostype += 'eabihf'
-        elif cputype == 'armv7l':
+        elif cputype in {'armv7l', 'armv8l'}:
             cputype = 'armv7'
             ostype += 'eabihf'
         elif cputype == 'aarch64':
index c235681cddd0c5dac224280a0d67c25c6599c23b..633b88dd2c42facd2f9b90fd55a0a7a7e0dc408a 100755 (executable)
@@ -41,8 +41,10 @@ if [ ! -f "$cache_valid_file" ]; then
     rm -rf "$CACHE_DIR"
     mkdir "$CACHE_DIR"
 else
+    set +o errexit
     stat_lines=$(cd "$cache_src_dir" && git status --porcelain | wc -l)
-    stat_ec=$(cd "$cache_src_dir" && git status >/dev/null 2>&1 && echo $?)
+    stat_ec=$(cd "$cache_src_dir" && git status >/dev/null 2>&1; echo $?)
+    set -o errexit
     if [ ! -d "$cache_src_dir/.git" -o $stat_lines != 0 -o $stat_ec != 0 ]; then
         # Something is badly wrong - the cache valid file is here, but something
         # about the git repo is fishy. Nuke it all, just in case
index 20812de524add29aa089e6bdd28dabcc08b5771a..93ce911ac6cc72169cb25bb45163beda497fde2b 100644 (file)
@@ -37,6 +37,7 @@
 - [collections](collections.md)
 - [collections_range](collections-range.md)
 - [command_envs](command-envs.md)
+- [compiler_barriers](compiler-barriers.md)
 - [compiler_builtins](compiler-builtins.md)
 - [compiler_builtins_lib](compiler-builtins-lib.md)
 - [concat_idents](concat-idents.md)
index 9b2c7f298979d016ec2927f099185b4d54fadbc5..b10bc41cb1439d08b46195030212d962535a96f3 100644 (file)
@@ -5,3 +5,38 @@ The tracking issue for this feature is: [#38487]
 [#38487]: https://github.com/rust-lang/rust/issues/38487
 
 ------------------------
+
+In the MSP430 architecture, interrupt handlers have a special calling
+convention. You can use the `"msp430-interrupt"` ABI to make the compiler apply
+the right calling convention to the interrupt handlers you define.
+
+<!-- NOTE(ignore) this example is specific to the msp430 target -->
+
+``` rust,ignore
+#![feature(abi_msp430_interrupt)]
+#![no_std]
+
+// Place the interrupt handler at the appropriate memory address
+// (Alternatively, you can use `#[used]` and remove `pub` and `#[no_mangle]`)
+#[link_section = "__interrupt_vector_10"]
+#[no_mangle]
+pub static TIM0_VECTOR: extern "msp430-interrupt" fn() = tim0;
+
+// The interrupt handler
+extern "msp430-interrupt" fn tim0() {
+    // ..
+}
+```
+
+``` text
+$ msp430-elf-objdump -CD ./target/msp430/release/app
+Disassembly of section __interrupt_vector_10:
+
+0000fff2 <TIM0_VECTOR>:
+    fff2:       00 c0           interrupt service routine at 0xc000
+
+Disassembly of section .text:
+
+0000c000 <int::tim0>:
+    c000:       00 13           reti
+```
index 9c1b8868aceb4bd5d208cf14691f4d781abd4874..0ded3ceeaef2ce9ea0457dc563aa53a0014b6737 100644 (file)
@@ -1,5 +1,60 @@
 # `abi_ptx`
 
-The tracking issue for this feature is: None.
+The tracking issue for this feature is: [#38788]
+
+[#38788]: https://github.com/rust-lang/rust/issues/38788
 
 ------------------------
+
+When emitting PTX code, all vanilla Rust functions (`fn`) get translated to
+"device" functions. These functions are *not* callable from the host via the
+CUDA API so a crate with only device functions is not too useful!
+
+OTOH, "global" functions *can* be called by the host; you can think of them
+as the real public API of your crate. To produce a global function use the
+`"ptx-kernel"` ABI.
+
+<!-- NOTE(ignore) this example is specific to the nvptx targets -->
+
+``` rust,ignore
+#![feature(abi_ptx)]
+#![no_std]
+
+pub unsafe extern "ptx-kernel" fn global_function() {
+    device_function();
+}
+
+pub fn device_function() {
+    // ..
+}
+```
+
+``` text
+$ xargo rustc --target nvptx64-nvidia-cuda --release -- --emit=asm
+
+$ cat $(find -name '*.s')
+//
+// Generated by LLVM NVPTX Back-End
+//
+
+.version 3.2
+.target sm_20
+.address_size 64
+
+        // .globl       _ZN6kernel15global_function17h46111ebe6516b382E
+
+.visible .entry _ZN6kernel15global_function17h46111ebe6516b382E()
+{
+
+
+        ret;
+}
+
+        // .globl       _ZN6kernel15device_function17hd6a0e4993bbf3f78E
+.visible .func _ZN6kernel15device_function17hd6a0e4993bbf3f78E()
+{
+
+
+        ret;
+}
+```
diff --git a/src/doc/unstable-book/src/compiler-barriers.md b/src/doc/unstable-book/src/compiler-barriers.md
new file mode 100644 (file)
index 0000000..827447f
--- /dev/null
@@ -0,0 +1,106 @@
+# `compiler_barriers`
+
+The tracking issue for this feature is: [#41091]
+
+[#41091]: https://github.com/rust-lang/rust/issues/41091
+
+------------------------
+
+The `compiler_barriers` feature exposes the `compiler_barrier` function
+in `std::sync::atomic`. This function is conceptually similar to C++'s
+`atomic_signal_fence`, which can currently only be accessed in nightly
+Rust using the `atomic_singlethreadfence_*` instrinsic functions in
+`core`, or through the mostly equivalent literal assembly:
+
+```rust
+#![feature(asm)]
+unsafe { asm!("" ::: "memory" : "volatile") };
+```
+
+A `compiler_barrier` restricts the kinds of memory re-ordering the
+compiler is allowed to do. Specifically, depending on the given ordering
+semantics, the compiler may be disallowed from moving reads or writes
+from before or after the call to the other side of the call to
+`compiler_barrier`. Note that it does **not** prevent the *hardware*
+from doing such re-ordering. This is not a problem in a single-threaded,
+execution context, but when other threads may modify memory at the same
+time, stronger synchronization primitives are required.
+
+## Examples
+
+`compiler_barrier` is generally only useful for preventing a thread from
+racing *with itself*. That is, if a given thread is executing one piece
+of code, and is then interrupted, and starts executing code elsewhere
+(while still in the same thread, and conceptually still on the same
+core). In traditional programs, this can only occur when a signal
+handler is registered. In more low-level code, such situations can also
+arise when handling interrupts, when implementing green threads with
+pre-emption, etc.
+
+To give a straightforward example of when a `compiler_barrier` is
+necessary, consider the following example:
+
+```rust
+# use std::sync::atomic::{AtomicBool, AtomicUsize};
+# use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
+# use std::sync::atomic::Ordering;
+static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
+static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
+
+fn main() {
+    IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
+    IS_READY.store(true, Ordering::Relaxed);
+}
+
+fn signal_handler() {
+    if IS_READY.load(Ordering::Relaxed) {
+        assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
+    }
+}
+```
+
+The way it is currently written, the `assert_eq!` is *not* guaranteed to
+succeed, despite everything happening in a single thread. To see why,
+remember that the compiler is free to swap the stores to
+`IMPORTANT_VARIABLE` and `IS_READ` since they are both
+`Ordering::Relaxed`. If it does, and the signal handler is invoked right
+after `IS_READY` is updated, then the signal handler will see
+`IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
+
+Using a `compiler_barrier`, we can remedy this situation:
+
+```rust
+#![feature(compiler_barriers)]
+# use std::sync::atomic::{AtomicBool, AtomicUsize};
+# use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
+# use std::sync::atomic::Ordering;
+use std::sync::atomic::compiler_barrier;
+
+static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
+static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
+
+fn main() {
+    IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
+    // prevent earlier writes from being moved beyond this point
+    compiler_barrier(Ordering::Release);
+    IS_READY.store(true, Ordering::Relaxed);
+}
+
+fn signal_handler() {
+    if IS_READY.load(Ordering::Relaxed) {
+        assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
+    }
+}
+```
+
+A deeper discussion of compiler barriers with various re-ordering
+semantics (such as `Ordering::SeqCst`) is beyond the scope of this text.
+Curious readers are encouraged to read the Linux kernel's discussion of
+[memory barriers][1], the C++ references on [`std::memory_order`][2] and
+[`atomic_signal_fence`][3], and [this StackOverflow answer][4] for
+further details.
+
+[1]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
+[2]: http://en.cppreference.com/w/cpp/atomic/memory_order
+[3]: http://www.cplusplus.com/reference/atomic/atomic_signal_fence/
+[4]: http://stackoverflow.com/a/18454971/472927
index 8986b968ca6c4368b5c384ff011744dcc0f9b2ae..5da8968fd0ce22724a09ae8be37a36ab8053671c 100644 (file)
@@ -1,5 +1,35 @@
 # `compiler_builtins_lib`
 
-This feature is internal to the Rust compiler and is not intended for general use.
+The tracking issue for this feature is: None.
 
 ------------------------
+
+This feature is required to link to the `compiler_builtins` crate which contains
+"compiler intrinsics". Compiler intrinsics are software implementations of basic
+operations like multiplication of `u64`s. These intrinsics are only required on
+platforms where these operations don't directly map to a hardware instruction.
+
+You should never need to explicitly link to the `compiler_builtins` crate when
+building "std" programs as `compiler_builtins` is already in the dependency
+graph of `std`. But you may need it when building `no_std` **binary** crates. If
+you get a *linker* error like:
+
+``` text
+$PWD/src/main.rs:11: undefined reference to `__aeabi_lmul'
+$PWD/src/main.rs:11: undefined reference to `__aeabi_uldivmod'
+```
+
+That means that you need to link to this crate.
+
+When you link to this crate, make sure it only appears once in your crate
+dependency graph. Also, it doesn't matter where in the dependency graph, you
+place the `compiler_builtins` crate.
+
+<!-- NOTE(ignore) doctests don't support `no_std` binaries -->
+
+``` rust,ignore
+#![feature(compiler_builtins_lib)]
+#![no_std]
+
+extern crate compiler_builtins;
+```
index 3ec3cba257a99f10b7a2b541a63783c180741dae..52fac575b6e8631cea0c182325979b84d63b18af 100644 (file)
@@ -1,6 +1,5 @@
 # `compiler_builtins`
 
-The tracking issue for this feature is: None.
+This feature is internal to the Rust compiler and is not intended for general use.
 
 ------------------------
-
index 2e1058bfc3413bd4efa59a5815c3fc1fae711541..0c70524ead246beee3a85bbe16754f9612f34329 100644 (file)
@@ -539,17 +539,16 @@ pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
         // We can't use atomic_nand here because it can result in a bool with
         // an invalid value. This happens because the atomic operation is done
         // with an 8-bit integer internally, which would set the upper 7 bits.
-        // So we just use a compare-exchange loop instead, which is what the
-        // intrinsic actually expands to anyways on many platforms.
-        let mut old = self.load(Relaxed);
-        loop {
-            let new = !(old && val);
-            match self.compare_exchange_weak(old, new, order, Relaxed) {
-                Ok(_) => break,
-                Err(x) => old = x,
-            }
+        // So we just use fetch_xor or swap instead.
+        if val {
+            // !(x & true) == !x
+            // We must invert the bool.
+            self.fetch_xor(true, order)
+        } else {
+            // !(x & false) == true
+            // We must set the bool to true.
+            self.swap(true, order)
         }
-        old
     }
 
     /// Logical "or" with a boolean value.
@@ -1592,6 +1591,47 @@ pub fn fence(order: Ordering) {
 }
 
 
+/// A compiler memory barrier.
+///
+/// `compiler_barrier` does not emit any machine code, but prevents the compiler from re-ordering
+/// memory operations across this point. Which reorderings are disallowed is dictated by the given
+/// [`Ordering`]. Note that `compiler_barrier` does *not* introduce inter-thread memory
+/// synchronization; for that, a [`fence`] is needed.
+///
+/// The re-ordering prevented by the different ordering semantics are:
+///
+///  - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
+///  - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
+///  - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
+///  - with [`AcqRel`], both of the above rules are enforced.
+///
+/// # Panics
+///
+/// Panics if `order` is [`Relaxed`].
+///
+/// [`fence`]: fn.fence.html
+/// [`Ordering`]: enum.Ordering.html
+/// [`Acquire`]: enum.Ordering.html#variant.Acquire
+/// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
+/// [`Release`]: enum.Ordering.html#variant.Release
+/// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
+/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
+#[inline]
+#[unstable(feature = "compiler_barriers", issue = "41091")]
+pub fn compiler_barrier(order: Ordering) {
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_singlethreadfence_acq(),
+            Release => intrinsics::atomic_singlethreadfence_rel(),
+            AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
+            SeqCst => intrinsics::atomic_singlethreadfence(),
+            Relaxed => panic!("there is no such thing as a relaxed barrier"),
+            __Nonexhaustive => panic!("invalid memory ordering"),
+        }
+    }
+}
+
+
 #[cfg(target_has_atomic = "8")]
 #[stable(feature = "atomic_debug", since = "1.3.0")]
 impl fmt::Debug for AtomicBool {
index b6bb5fddf4a4bdc4fed02347bfc312cc53636f0e..9babe24a985634e14975237f000d9edc400fba7e 100644 (file)
@@ -24,10 +24,23 @@ fn bool_() {
 #[test]
 fn bool_and() {
     let a = AtomicBool::new(true);
-    assert_eq!(a.fetch_and(false, SeqCst),true);
+    assert_eq!(a.fetch_and(false, SeqCst), true);
     assert_eq!(a.load(SeqCst),false);
 }
 
+#[test]
+fn bool_nand() {
+    let a = AtomicBool::new(false);
+    assert_eq!(a.fetch_nand(false, SeqCst), false);
+    assert_eq!(a.load(SeqCst), true);
+    assert_eq!(a.fetch_nand(false, SeqCst), true);
+    assert_eq!(a.load(SeqCst), true);
+    assert_eq!(a.fetch_nand(true, SeqCst), true);
+    assert_eq!(a.load(SeqCst), false);
+    assert_eq!(a.fetch_nand(true, SeqCst), false);
+    assert_eq!(a.load(SeqCst), true);
+}
+
 #[test]
 fn uint_and() {
     let x = AtomicUsize::new(0xf731);
index c7ad143c94979762f7ac060dc2be4ba9e91a865a..2c8b145f126cd17ffb05ddd7c805dc4dc9c21132 100644 (file)
@@ -578,7 +578,7 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) {
         TyTypeof(expression) => {
             visitor.visit_nested_body(expression)
         }
-        TyInfer => {}
+        TyInfer | TyErr => {}
     }
 }
 
index 3f4390536b0428298c1bf74b83c451de74b15ce7..30fec50d4eb6fef66ea3cc62a28353977f6bf9be 100644 (file)
@@ -555,6 +555,7 @@ fn lower_ty_binding(&mut self, b: &TypeBinding) -> hir::TypeBinding {
     fn lower_ty(&mut self, t: &Ty) -> P<hir::Ty> {
         let kind = match t.node {
             TyKind::Infer => hir::TyInfer,
+            TyKind::Err => hir::TyErr,
             TyKind::Slice(ref ty) => hir::TySlice(self.lower_ty(ty)),
             TyKind::Ptr(ref mt) => hir::TyPtr(self.lower_mt(mt)),
             TyKind::Rptr(ref region, ref mt) => {
index d5000ac9c18669a3940bbea0a4629eaa6645d614..0da405d1821d360d8d804ae6463a7f42af706e86 100644 (file)
@@ -1351,6 +1351,8 @@ pub enum Ty_ {
     /// TyInfer means the type should be inferred instead of it having been
     /// specified. This can appear anywhere in a type.
     TyInfer,
+    /// Placeholder for a type that has failed to be defined.
+    TyErr,
 }
 
 #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
index 04a65fd5e3aa47c0a748993578135da24124bf5b..4a5a35aa82ca0ef5799733d4021f2c4d86e49c50 100644 (file)
@@ -450,6 +450,9 @@ pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> {
             hir::TyInfer => {
                 word(&mut self.s, "_")?;
             }
+            hir::TyErr => {
+                word(&mut self.s, "?")?;
+            }
         }
         self.end()
     }
index fb18f50027e29602d2a9ea630f6619e3767a1da8..9cf8a0693d3636383133b2cbf0198e7bdb6141e0 100644 (file)
@@ -230,6 +230,7 @@ fn hash_stable<W: StableHasherResult>(&self,
             hir::TyTraitObject(..) |
             hir::TyImplTrait(..)   |
             hir::TyTypeof(..)      |
+            hir::TyErr             |
             hir::TyInfer           => {
                 NodeIdHashingMode::Ignore
             }
@@ -282,6 +283,7 @@ fn hash_stable<W: StableHasherResult>(&self,
     TyTraitObject(trait_refs, lifetime),
     TyImplTrait(bounds),
     TyTypeof(body_id),
+    TyErr,
     TyInfer
 });
 
index 8b246105f61693b147688622d62203ddfa33653b..f0f082a2561cca6f3814dd1acaf46b2ec4828805 100644 (file)
@@ -181,6 +181,7 @@ pub struct DataflowAnalysis<'a, 'tcx: 'a, O>
     where O: BitDenotation
 {
     flow_state: DataflowState<O>,
+    dead_unwinds: &'a IdxSet<mir::BasicBlock>,
     mir: &'a Mir<'tcx>,
 }
 
@@ -377,6 +378,7 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D>
 {
     pub fn new(_tcx: TyCtxt<'a, 'tcx, 'tcx>,
                mir: &'a Mir<'tcx>,
+               dead_unwinds: &'a IdxSet<mir::BasicBlock>,
                denotation: D) -> Self {
         let bits_per_block = denotation.bits_per_block();
         let usize_bits = mem::size_of::<usize>() * 8;
@@ -397,6 +399,7 @@ pub fn new(_tcx: TyCtxt<'a, 'tcx, 'tcx>,
 
         DataflowAnalysis {
             mir: mir,
+            dead_unwinds: dead_unwinds,
             flow_state: DataflowState {
                 sets: AllSets {
                     bits_per_block: bits_per_block,
@@ -452,7 +455,9 @@ fn propagate_bits_into_graph_successors_of(
                 ref target, value: _, location: _, unwind: Some(ref unwind)
             } => {
                 self.propagate_bits_into_entry_set_for(in_out, changed, target);
-                self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
+                if !self.dead_unwinds.contains(&bb) {
+                    self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
+                }
             }
             mir::TerminatorKind::SwitchInt { ref targets, .. } => {
                 for target in targets {
@@ -461,7 +466,9 @@ fn propagate_bits_into_graph_successors_of(
             }
             mir::TerminatorKind::Call { ref cleanup, ref destination, func: _, args: _ } => {
                 if let Some(ref unwind) = *cleanup {
-                    self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
+                    if !self.dead_unwinds.contains(&bb) {
+                        self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
+                    }
                 }
                 if let Some((ref dest_lval, ref dest_bb)) = *destination {
                     // N.B.: This must be done *last*, after all other
index 88ec86cc95d614dfe461a30620d73dc6c5c9a064..713e656666271a97c32cae318612ddb3af349bdf 100644 (file)
@@ -11,8 +11,8 @@
 use super::gather_moves::{HasMoveData, MoveData, MovePathIndex, LookupResult};
 use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
 use super::dataflow::{DataflowResults};
-use super::{drop_flag_effects_for_location, on_all_children_bits};
-use super::on_lookup_result_bits;
+use super::{on_all_children_bits, on_all_drop_children_bits};
+use super::{drop_flag_effects_for_location, on_lookup_result_bits};
 use super::MoveDataParamEnv;
 use rustc::ty::{self, TyCtxt};
 use rustc::mir::*;
@@ -24,6 +24,7 @@
 use rustc_mir::util::patch::MirPatch;
 use rustc_mir::util::elaborate_drops::{DropFlagState, elaborate_drop};
 use rustc_mir::util::elaborate_drops::{DropElaborator, DropStyle, DropFlagMode};
+use syntax::ast;
 use syntax_pos::Span;
 
 use std::fmt;
@@ -49,12 +50,13 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
                 move_data: move_data,
                 param_env: param_env
             };
+            let dead_unwinds = find_dead_unwinds(tcx, mir, id, &env);
             let flow_inits =
-                super::do_dataflow(tcx, mir, id, &[],
+                super::do_dataflow(tcx, mir, id, &[], &dead_unwinds,
                                    MaybeInitializedLvals::new(tcx, mir, &env),
                                    |bd, p| &bd.move_data().move_paths[p]);
             let flow_uninits =
-                super::do_dataflow(tcx, mir, id, &[],
+                super::do_dataflow(tcx, mir, id, &[], &dead_unwinds,
                                    MaybeUninitializedLvals::new(tcx, mir, &env),
                                    |bd, p| &bd.move_data().move_paths[p]);
 
@@ -74,6 +76,67 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
 
 impl Pass for ElaborateDrops {}
 
+/// Return the set of basic blocks whose unwind edges are known
+/// to not be reachable, because they are `drop` terminators
+/// that can't drop anything.
+fn find_dead_unwinds<'a, 'tcx>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    mir: &Mir<'tcx>,
+    id: ast::NodeId,
+    env: &MoveDataParamEnv<'tcx>)
+    -> IdxSetBuf<BasicBlock>
+{
+    debug!("find_dead_unwinds({:?})", mir.span);
+    // We only need to do this pass once, because unwind edges can only
+    // reach cleanup blocks, which can't have unwind edges themselves.
+    let mut dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+    let flow_inits =
+        super::do_dataflow(tcx, mir, id, &[], &dead_unwinds,
+                           MaybeInitializedLvals::new(tcx, mir, &env),
+                           |bd, p| &bd.move_data().move_paths[p]);
+    for (bb, bb_data) in mir.basic_blocks().iter_enumerated() {
+        match bb_data.terminator().kind {
+            TerminatorKind::Drop { ref location, unwind: Some(_), .. } |
+            TerminatorKind::DropAndReplace { ref location, unwind: Some(_), .. } => {
+                let mut init_data = InitializationData {
+                    live: flow_inits.sets().on_entry_set_for(bb.index()).to_owned(),
+                    dead: IdxSetBuf::new_empty(env.move_data.move_paths.len()),
+                };
+                debug!("find_dead_unwinds @ {:?}: {:?}; init_data={:?}",
+                       bb, bb_data, init_data.live);
+                for stmt in 0..bb_data.statements.len() {
+                    let loc = Location { block: bb, statement_index: stmt };
+                    init_data.apply_location(tcx, mir, env, loc);
+                }
+
+                let path = match env.move_data.rev_lookup.find(location) {
+                    LookupResult::Exact(e) => e,
+                    LookupResult::Parent(..) => {
+                        debug!("find_dead_unwinds: has parent; skipping");
+                        continue
+                    }
+                };
+
+                debug!("find_dead_unwinds @ {:?}: path({:?})={:?}", bb, location, path);
+
+                let mut maybe_live = false;
+                on_all_drop_children_bits(tcx, mir, &env, path, |child| {
+                    let (child_maybe_live, _) = init_data.state(child);
+                    maybe_live |= child_maybe_live;
+                });
+
+                debug!("find_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live);
+                if !maybe_live {
+                    dead_unwinds.add(&bb);
+                }
+            }
+            _ => {}
+        }
+    }
+
+    dead_unwinds
+}
+
 struct InitializationData {
     live: IdxSetBuf<MovePathIndex>,
     dead: IdxSetBuf<MovePathIndex>
@@ -144,17 +207,14 @@ fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
                 let mut some_live = false;
                 let mut some_dead = false;
                 let mut children_count = 0;
-                on_all_children_bits(
-                    self.tcx(), self.mir(), self.ctxt.move_data(),
-                    path, |child| {
-                        if self.ctxt.path_needs_drop(child) {
-                            let (live, dead) = self.init_data.state(child);
-                            debug!("elaborate_drop: state({:?}) = {:?}",
-                                   child, (live, dead));
-                            some_live |= live;
-                            some_dead |= dead;
-                            children_count += 1;
-                        }
+                on_all_drop_children_bits(
+                    self.tcx(), self.mir(), self.ctxt.env, path, |child| {
+                        let (live, dead) = self.init_data.state(child);
+                        debug!("elaborate_drop: state({:?}) = {:?}",
+                               child, (live, dead));
+                        some_live |= live;
+                        some_dead |= dead;
+                        children_count += 1;
                     });
                 ((some_live, some_dead), children_count != 1)
             }
@@ -276,15 +336,6 @@ fn elaborate(mut self) -> MirPatch<'tcx>
         self.patch
     }
 
-    fn path_needs_drop(&self, path: MovePathIndex) -> bool
-    {
-        let lvalue = &self.move_data().move_paths[path].lvalue;
-        let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
-        debug!("path_needs_drop({:?}, {:?} : {:?})", path, lvalue, ty);
-
-        self.tcx.type_needs_drop_given_env(ty, self.param_env())
-    }
-
     fn collect_drop_flags(&mut self)
     {
         for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
@@ -318,14 +369,12 @@ fn collect_drop_flags(&mut self)
                 }
             };
 
-            on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| {
-                if self.path_needs_drop(child) {
-                    let (maybe_live, maybe_dead) = init_data.state(child);
-                    debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
-                           child, location, path, (maybe_live, maybe_dead));
-                    if maybe_live && maybe_dead {
-                        self.create_drop_flag(child)
-                    }
+            on_all_drop_children_bits(self.tcx, self.mir, self.env, path, |child| {
+                let (maybe_live, maybe_dead) = init_data.state(child);
+                debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
+                       child, location, path, (maybe_live, maybe_dead));
+                if maybe_live && maybe_dead {
+                    self.create_drop_flag(child)
                 }
             });
         }
index 9237bb31f6bd7f62c267550d29723c46d69bcf13..dc01cbe5e7605eb3a4fd4a9e19e269d25fe531a8 100644 (file)
@@ -17,6 +17,7 @@
 use rustc::session::Session;
 use rustc::ty::{self, TyCtxt};
 use rustc_mir::util::elaborate_drops::DropFlagState;
+use rustc_data_structures::indexed_set::{IdxSet, IdxSetBuf};
 
 mod abs_domain;
 pub mod elaborate_drops;
@@ -64,14 +65,18 @@ pub fn borrowck_mir(bcx: &mut BorrowckCtxt,
     let param_env = ty::ParameterEnvironment::for_item(tcx, id);
     let move_data = MoveData::gather_moves(mir, tcx, &param_env);
     let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env };
+    let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
     let flow_inits =
-        do_dataflow(tcx, mir, id, attributes, MaybeInitializedLvals::new(tcx, mir, &mdpe),
+        do_dataflow(tcx, mir, id, attributes, &dead_unwinds,
+                    MaybeInitializedLvals::new(tcx, mir, &mdpe),
                     |bd, i| &bd.move_data().move_paths[i]);
     let flow_uninits =
-        do_dataflow(tcx, mir, id, attributes, MaybeUninitializedLvals::new(tcx, mir, &mdpe),
+        do_dataflow(tcx, mir, id, attributes, &dead_unwinds,
+                    MaybeUninitializedLvals::new(tcx, mir, &mdpe),
                     |bd, i| &bd.move_data().move_paths[i]);
     let flow_def_inits =
-        do_dataflow(tcx, mir, id, attributes, DefinitelyInitializedLvals::new(tcx, mir, &mdpe),
+        do_dataflow(tcx, mir, id, attributes, &dead_unwinds,
+                    DefinitelyInitializedLvals::new(tcx, mir, &mdpe),
                     |bd, i| &bd.move_data().move_paths[i]);
 
     if has_rustc_mir_with(attributes, "rustc_peek_maybe_init").is_some() {
@@ -108,6 +113,7 @@ fn do_dataflow<'a, 'tcx, BD, P>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                                 mir: &Mir<'tcx>,
                                 node_id: ast::NodeId,
                                 attributes: &[ast::Attribute],
+                                dead_unwinds: &IdxSet<BasicBlock>,
                                 bd: BD,
                                 p: P)
                                 -> DataflowResults<BD>
@@ -137,7 +143,7 @@ fn do_dataflow<'a, 'tcx, BD, P>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
         node_id: node_id,
         print_preflow_to: print_preflow_to,
         print_postflow_to: print_postflow_to,
-        flow_state: DataflowAnalysis::new(tcx, mir, bd),
+        flow_state: DataflowAnalysis::new(tcx, mir, dead_unwinds, bd),
     };
 
     mbcx.dataflow(p);
@@ -303,6 +309,27 @@ fn on_all_children_bits<'a, 'tcx, F>(
     on_all_children_bits(tcx, mir, move_data, move_path_index, &mut each_child);
 }
 
+fn on_all_drop_children_bits<'a, 'tcx, F>(
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    mir: &Mir<'tcx>,
+    ctxt: &MoveDataParamEnv<'tcx>,
+    path: MovePathIndex,
+    mut each_child: F)
+    where F: FnMut(MovePathIndex)
+{
+    on_all_children_bits(tcx, mir, &ctxt.move_data, path, |child| {
+        let lvalue = &ctxt.move_data.move_paths[path].lvalue;
+        let ty = lvalue.ty(mir, tcx).to_ty(tcx);
+        debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, lvalue, ty);
+
+        if tcx.type_needs_drop_given_env(ty, &ctxt.param_env) {
+            each_child(child);
+        } else {
+            debug!("on_all_drop_children_bits - skipping")
+        }
+    })
+}
+
 fn drop_flag_effects_for_function_entry<'a, 'tcx, F>(
     tcx: TyCtxt<'a, 'tcx, 'tcx>,
     mir: &Mir<'tcx>,
index 9d236bd013c43114db3ef0f13921c839fd933e78..8eabe92fb98c005a7a4beec26e456b02e9791f47 100644 (file)
@@ -603,7 +603,8 @@ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
             Rvalue::Cast(CastKind::ReifyFnPointer, ..) |
             Rvalue::Cast(CastKind::UnsafeFnPointer, ..) |
             Rvalue::Cast(CastKind::ClosureFnPointer, ..) |
-            Rvalue::Cast(CastKind::Unsize, ..) => {}
+            Rvalue::Cast(CastKind::Unsize, ..) |
+            Rvalue::Discriminant(..) => {}
 
             Rvalue::Len(_) => {
                 // Static lvalues in consts would have errored already,
@@ -721,14 +722,6 @@ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
                 }
             }
 
-            Rvalue::Discriminant(..) => {
-                // FIXME discriminant
-                self.add(Qualif::NOT_CONST);
-                if self.mode != Mode::Fn {
-                    bug!("implement discriminant const qualify");
-                }
-            }
-
             Rvalue::Box(_) => {
                 self.add(Qualif::NOT_CONST);
                 if self.mode != Mode::Fn {
index d9900340a2e9f35613f67e9cae28e062298b703a..c94f63329d1ff1ec3c104835d630fac5a8d06722 100644 (file)
@@ -2222,6 +2222,7 @@ fn smart_resolve_path_fragment(&mut self,
                                    -> PathResolution {
         let ns = source.namespace();
         let is_expected = &|def| source.is_expected(def);
+        let is_enum_variant = &|def| if let Def::Variant(..) = def { true } else { false };
 
         // Base error is amended with one short label and possibly some longer helps/notes.
         let report_errors = |this: &mut Self, def: Option<Def>| {
@@ -2272,6 +2273,21 @@ fn smart_resolve_path_fragment(&mut self,
             if !candidates.is_empty() {
                 // Report import candidates as help and proceed searching for labels.
                 show_candidates(&mut err, &candidates, def.is_some());
+            } else if is_expected(Def::Enum(DefId::local(CRATE_DEF_INDEX))) {
+                let enum_candidates = this.lookup_import_candidates(name, ns, is_enum_variant);
+                let mut enum_candidates = enum_candidates.iter()
+                    .map(|suggestion| import_candidate_to_paths(&suggestion)).collect::<Vec<_>>();
+                enum_candidates.sort();
+                for (sp, variant_path, enum_path) in enum_candidates {
+                    let msg = format!("there is an enum variant `{}`, did you mean to use `{}`?",
+                                      variant_path,
+                                      enum_path);
+                    if sp == DUMMY_SP {
+                        err.help(&msg);
+                    } else {
+                        err.span_help(sp, &msg);
+                    }
+                }
             }
             if path.len() == 1 && this.self_type_is_available() {
                 if let Some(candidate) = this.lookup_assoc_candidate(name, ns, is_expected) {
@@ -3424,6 +3440,22 @@ fn path_names_to_string(path: &Path) -> String {
     names_to_string(&path.segments.iter().map(|seg| seg.identifier).collect::<Vec<_>>())
 }
 
+/// Get the path for an enum and the variant from an `ImportSuggestion` for an enum variant.
+fn import_candidate_to_paths(suggestion: &ImportSuggestion) -> (Span, String, String) {
+    let variant_path = &suggestion.path;
+    let variant_path_string = path_names_to_string(variant_path);
+
+    let path_len = suggestion.path.segments.len();
+    let enum_path = ast::Path {
+        span: suggestion.path.span,
+        segments: suggestion.path.segments[0..path_len - 1].to_vec(),
+    };
+    let enum_path_string = path_names_to_string(&enum_path);
+
+    (suggestion.path.span, variant_path_string, enum_path_string)
+}
+
+
 /// When an entity with a given name is not available in scope, we search for
 /// entities with that name in all crates. This method allows outputting the
 /// results of this search in a programmer-friendly way
index 923ec05c22b77d0410952f3349460e15f4f53331..66c4a81a5c0f2f582975ddc130721dfadf7cb614 100644 (file)
@@ -1229,6 +1229,9 @@ pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> {
                 // handled specially and will not descend into this routine.
                 self.ty_infer(ast_ty.span)
             }
+            hir::TyErr => {
+                tcx.types.err
+            }
         };
 
         cache.borrow_mut().insert(ast_ty.id, result_ty);
index 90d2a15cf08637f4b94ae961aaeed13cdde50fb6..9f41373dab1b74ec2cd3b0ae858567818a17cceb 100644 (file)
@@ -278,9 +278,12 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
     debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
            typ, scope);
 
-    let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| {
-        span_bug!(span, "no enclosing scope found for scope: {:?}", scope)
-    });
+
+    let parent_scope = match rcx.tcx.region_maps.opt_encl_scope(scope) {
+      Some(parent_scope) => parent_scope,
+      // If no enclosing scope, then it must be the root scope which cannot be outlived.
+      None => return
+    };
 
     let result = iterate_over_potentially_unsafe_regions_in_type(
         &mut DropckContext {
index f3ea6c4467c401c396bccf1733a2494ea1040efa..ac72d7d29a24c4813839cd5b7185544ee269b698 100644 (file)
@@ -1805,7 +1805,7 @@ fn clean(&self, cx: &DocContext) -> Type {
             }
             TyBareFn(ref barefn) => BareFunction(box barefn.clean(cx)),
             TyImplTrait(ref bounds) => ImplTrait(bounds.clean(cx)),
-            TyInfer => Infer,
+            TyInfer | TyErr => Infer,
             TyTypeof(..) => panic!("Unimplemented type {:?}", self.node),
         }
     }
index 9eb86aa006d171f4ca0af868bdb13bd66921a237..c6a3e8a2dedc4653985b20276673a99c3fa658b9 100644 (file)
@@ -1361,6 +1361,8 @@ pub enum TyKind {
     ImplicitSelf,
     // A macro in the type position.
     Mac(Mac),
+    /// Placeholder for a kind that has failed to be defined.
+    Err,
 }
 
 /// Inline assembly dialect.
index 550f1160bed858990976922e25509e1ce577e535..c68e2ae34687b347cc095ca3b6d7787ec3c265db 100644 (file)
@@ -408,7 +408,7 @@ pub fn new() -> Features {
     // Allows the definition recursive static items.
     (accepted, static_recursion, "1.17.0", Some(29719)),
     // pub(restricted) visibilities (RFC 1422)
-    (accepted, pub_restricted, "1.17.0", Some(32409)),
+    (accepted, pub_restricted, "1.18.0", Some(32409)),
     // The #![windows_subsystem] attribute
     (accepted, windows_subsystem, "1.18.0", Some(37499)),
 );
index 1a4e196ac557778e3b39ec6760495ebbadbd79c6..92e25b00e0ac108cfd41d90b06e1773f45b21d38 100644 (file)
@@ -358,7 +358,7 @@ pub fn noop_fold_ty<T: Folder>(t: P<Ty>, fld: &mut T) -> P<Ty> {
     t.map(|Ty {id, node, span}| Ty {
         id: fld.new_id(id),
         node: match node {
-            TyKind::Infer | TyKind::ImplicitSelf => node,
+            TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => node,
             TyKind::Slice(ty) => TyKind::Slice(fld.fold_ty(ty)),
             TyKind::Ptr(mt) => TyKind::Ptr(fld.fold_mt(mt)),
             TyKind::Rptr(region, mt) => {
index 0dd2c03acb654b8eee67693b3d561381b65afcd0..43d21015a4fb14569ac0f1635ae18b218b1bc550 100644 (file)
@@ -407,6 +407,25 @@ fn from(expr: P<Expr>) -> Self {
     }
 }
 
+/// Create a placeholder argument.
+fn dummy_arg(span: Span) -> Arg {
+    let spanned = Spanned {
+        span: span,
+        node: keywords::Invalid.ident()
+    };
+    let pat = P(Pat {
+        id: ast::DUMMY_NODE_ID,
+        node: PatKind::Ident(BindingMode::ByValue(Mutability::Immutable), spanned, None),
+        span: span
+    });
+    let ty = Ty {
+        node: TyKind::Err,
+        span: span,
+        id: ast::DUMMY_NODE_ID
+    };
+    Arg { ty: P(ty), pat: pat, id: ast::DUMMY_NODE_ID }
+}
+
 impl<'a> Parser<'a> {
     pub fn new(sess: &'a ParseSess,
                tokens: TokenStream,
@@ -4376,8 +4395,12 @@ fn parse_fn_args(&mut self, named_args: bool, allow_variadic: bool)
                             Ok(arg) => Ok(Some(arg)),
                             Err(mut e) => {
                                 e.emit();
+                                let lo = p.prev_span;
+                                // Skip every token until next possible arg or end.
                                 p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(token::Paren)]);
-                                Ok(None)
+                                // Create a placeholder argument for proper arg count (#34264).
+                                let span = lo.to(p.prev_span);
+                                Ok(Some(dummy_arg(span)))
                             }
                         }
                     }
index f042a18d610360b1184c306d95117706de49056d..e7feff2b79fceb9936a665f90923ee0f9fa490b1 100644 (file)
@@ -1095,6 +1095,9 @@ pub fn print_type(&mut self, ty: &ast::Ty) -> io::Result<()> {
             ast::TyKind::Infer => {
                 word(&mut self.s, "_")?;
             }
+            ast::TyKind::Err => {
+                word(&mut self.s, "?")?;
+            }
             ast::TyKind::ImplicitSelf => {
                 word(&mut self.s, "Self")?;
             }
index a5333f3bb6a6e46c1f3af1c8e1d30194c194d897..b5e9a1892acc9927446c1cfffcd394cbbcd93094 100644 (file)
@@ -350,7 +350,7 @@ pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) {
         TyKind::Typeof(ref expression) => {
             visitor.visit_expr(expression)
         }
-        TyKind::Infer | TyKind::ImplicitSelf => {}
+        TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {}
         TyKind::Mac(ref mac) => {
             visitor.visit_mac(mac)
         }
diff --git a/src/test/compile-fail/const-match-pattern-arm.rs b/src/test/compile-fail/const-match-pattern-arm.rs
new file mode 100644 (file)
index 0000000..452aa87
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const x: bool = match Some(true) {
+    Some(value) => true,
+    //~^ ERROR: constant contains unimplemented expression type [E0019]
+    _ => false
+};
+
+const y: bool = {
+    match Some(true) {
+        Some(value) => true,
+        //~^ ERROR: constant contains unimplemented expression type [E0019]
+        _ => false
+    }
+};
+
+fn main() {}
diff --git a/src/test/compile-fail/issue-35675.rs b/src/test/compile-fail/issue-35675.rs
new file mode 100644 (file)
index 0000000..f990c2c
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Fruit {
+    Apple(i64),
+    //~^ HELP there is an enum variant `Fruit::Apple`, did you mean to use `Fruit`?
+    //~| HELP there is an enum variant `Fruit::Apple`, did you mean to use `Fruit`?
+    Orange(i64),
+}
+
+fn should_return_fruit() -> Apple {
+    //~^ ERROR cannot find type `Apple` in this scope
+    //~| NOTE not found in this scope
+    Apple(5)
+    //~^ ERROR cannot find function `Apple` in this scope
+    //~| NOTE not found in this scope
+    //~| HELP possible candidate is found in another module, you can import it into scope
+}
+
+fn should_return_fruit_too() -> Fruit::Apple {
+    //~^ ERROR expected type, found variant `Fruit::Apple`
+    //~| NOTE not a type
+    Apple(5)
+    //~^ ERROR cannot find function `Apple` in this scope
+    //~| NOTE not found in this scope
+    //~| HELP possible candidate is found in another module, you can import it into scope
+}
+
+fn foo() -> Ok {
+    //~^ ERROR expected type, found variant `Ok`
+    //~| NOTE not a type
+    //~| HELP there is an enum variant
+    //~| HELP there is an enum variant
+    Ok(())
+}
+
+fn bar() -> Variant3 {
+    //~^ ERROR cannot find type `Variant3` in this scope
+    //~| NOTE not found in this scope
+}
+
+fn qux() -> Some {
+    //~^ ERROR expected type, found variant `Some`
+    //~| NOTE not a type
+    //~| HELP there is an enum variant
+    //~| HELP there is an enum variant
+    Some(1)
+}
+
+fn main() {}
+
+mod x {
+    enum Enum {
+        Variant1,
+        Variant2(),
+        Variant3(usize),
+        //~^ HELP there is an enum variant `x::Enum::Variant3`, did you mean to use `x::Enum`?
+        Variant4 {},
+    }
+}
diff --git a/src/test/mir-opt/issue-41110.rs b/src/test/mir-opt/issue-41110.rs
new file mode 100644 (file)
index 0000000..fec635b
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// check that we don't emit multiple drop flags when they are not needed.
+
+fn main() {
+    let x = S.other(S.id());
+}
+
+pub fn test() {
+    let u = S;
+    let mut v = S;
+    drop(v);
+    v = u;
+}
+
+struct S;
+impl Drop for S {
+    fn drop(&mut self) {
+    }
+}
+
+impl S {
+    fn id(self) -> Self { self }
+    fn other(self, s: Self) {}
+}
+
+// END RUST SOURCE
+// START rustc.node4.ElaborateDrops.after.mir
+//    let mut _2: S;
+//    let mut _3: ();
+//    let mut _4: S;
+//    let mut _5: S;
+//    let mut _6: bool;
+//
+//    bb0: {
+// END rustc.node4.ElaborateDrops.after.mir
+// START rustc.node13.ElaborateDrops.after.mir
+//    let mut _2: ();
+//    let mut _4: ();
+//    let mut _5: S;
+//    let mut _6: S;
+//    let mut _7: bool;
+//
+//    bb0: {
+// END rustc.node13.ElaborateDrops.after.mir
diff --git a/src/test/ui/span/issue-34264.rs b/src/test/ui/span/issue-34264.rs
new file mode 100644 (file)
index 0000000..00482f5
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo(Option<i32>, String) {}
+fn bar(x, y: usize) {}
+
+fn main() {
+    foo(Some(42), 2);
+    foo(Some(42), 2, "");
+    bar("", "");
+    bar(1, 2);
+    bar(1, 2, 3);
+}
diff --git a/src/test/ui/span/issue-34264.stderr b/src/test/ui/span/issue-34264.stderr
new file mode 100644 (file)
index 0000000..98183e2
--- /dev/null
@@ -0,0 +1,49 @@
+error: expected one of `:` or `@`, found `<`
+  --> $DIR/issue-34264.rs:11:14
+   |
+11 | fn foo(Option<i32>, String) {}
+   |              ^ expected one of `:` or `@` here
+
+error: expected one of `:` or `@`, found `)`
+  --> $DIR/issue-34264.rs:11:27
+   |
+11 | fn foo(Option<i32>, String) {}
+   |                           ^ expected one of `:` or `@` here
+
+error: expected one of `:` or `@`, found `,`
+  --> $DIR/issue-34264.rs:12:9
+   |
+12 | fn bar(x, y: usize) {}
+   |         ^ expected one of `:` or `@` here
+
+error[E0061]: this function takes 2 parameters but 3 parameters were supplied
+  --> $DIR/issue-34264.rs:16:9
+   |
+11 | fn foo(Option<i32>, String) {}
+   | ------------------------------ defined here
+...
+16 |     foo(Some(42), 2, "");
+   |         ^^^^^^^^^^^^^^^ expected 2 parameters
+
+error[E0308]: mismatched types
+  --> $DIR/issue-34264.rs:17:13
+   |
+17 |     bar("", "");
+   |             ^^ expected usize, found reference
+   |
+   = note: expected type `usize`
+              found type `&'static str`
+   = help: here are some functions which might fulfill your needs:
+           - .len()
+
+error[E0061]: this function takes 2 parameters but 3 parameters were supplied
+  --> $DIR/issue-34264.rs:19:9
+   |
+12 | fn bar(x, y: usize) {}
+   | ---------------------- defined here
+...
+19 |     bar(1, 2, 3);
+   |         ^^^^^^^ expected 2 parameters
+
+error: aborting due to 3 previous errors
+