[auto-toolstate][2+3/8] Move external tools tests into its own job with --no-fail-fast
This PR performs these things:
1. The `aux` job now performs "cargotest" and "pretty" tests. The clippy/rustfmt/rls/miri tests are moved into its own job.
2. These tests are run with `--no-fail-fast`, so that we can get the maximum number of failures of all tools from a single CI run.
3. The test results are stored into a JSON file, ready to be uploaded in the future.
This is step 2 and 3/8 of automatic management of broken tools #45861.
pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
self.kind.successors_mut()
}
+
+ pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ self.kind.unwind_mut()
+ }
}
impl<'tcx> TerminatorKind<'tcx> {
}
}
}
+
+ pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ match *self {
+ TerminatorKind::Goto { .. } |
+ TerminatorKind::Resume |
+ TerminatorKind::Return |
+ TerminatorKind::Unreachable |
+ TerminatorKind::GeneratorDrop |
+ TerminatorKind::Yield { .. } |
+ TerminatorKind::SwitchInt { .. } |
+ TerminatorKind::FalseEdges { .. } => {
+ None
+ },
+ TerminatorKind::Call { cleanup: ref mut unwind, .. } |
+ TerminatorKind::Assert { cleanup: ref mut unwind, .. } |
+ TerminatorKind::DropAndReplace { ref mut unwind, .. } |
+ TerminatorKind::Drop { ref mut unwind, .. } => {
+ Some(unwind)
+ }
+ }
+ }
}
impl<'tcx> BasicBlockData<'tcx> {
// StorageDead, but we don't always emit those (notably on unwind paths),
// so this "extra check" serves as a kind of backup.
let domain = flow_state.borrows.base_results.operator();
- for borrow in domain.borrows() {
- let root_place = self.prefixes(
- &borrow.place,
- PrefixSet::All
- ).last().unwrap();
- match root_place {
- Place::Static(_) => {
- self.access_place(
- ContextKind::StorageDead.new(loc),
- (&root_place, self.mir.source_info(borrow.location).span),
- (Deep, Write(WriteKind::StorageDeadOrDrop)),
- LocalMutationIsAllowed::Yes,
- flow_state
- );
- }
- Place::Local(_) => {
- self.access_place(
- ContextKind::StorageDead.new(loc),
- (&root_place, self.mir.source_info(borrow.location).span),
- (Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
- LocalMutationIsAllowed::Yes,
- flow_state
- );
- }
- Place::Projection(_) => ()
+ let data = domain.borrows();
+ flow_state.borrows.with_elems_outgoing(|borrows| for i in borrows {
+ let borrow = &data[i];
+
+ if self.place_is_invalidated_at_exit(&borrow.place) {
+ debug!("borrow conflicts at exit {:?}", borrow);
+ let borrow_span = self.mir.source_info(borrow.location).span;
+ // FIXME: should be talking about the region lifetime instead
+ // of just a span here.
+ let end_span = domain.opt_region_end_span(&borrow.region);
+
+ self.report_borrowed_value_does_not_live_long_enough(
+ ContextKind::StorageDead.new(loc),
+ (&borrow.place, borrow_span),
+ end_span)
}
- }
+ });
}
TerminatorKind::Goto { target: _ } |
TerminatorKind::Unreachable |
context, common_prefix, place_span, bk,
&borrow, end_issued_loan_span)
}
- WriteKind::StorageDeadOrDrop => {
+ WriteKind::StorageDeadOrDrop => {
let end_span =
flow_state.borrows.base_results.operator().opt_region_end_span(
&borrow.region);
Operand::Constant(_) => {}
}
}
+
+ /// Returns whether a borrow of this place is invalidated when the function
+ /// exits
+ fn place_is_invalidated_at_exit(&self, place: &Place<'tcx>) -> bool {
+ debug!("place_is_invalidated_at_exit({:?})", place);
+ let root_place = self.prefixes(place, PrefixSet::All).last().unwrap();
+
+ // FIXME(nll-rfc#40): do more precise destructor tracking here. For now
+ // we just know that all locals are dropped at function exit (otherwise
+ // we'll have a memory leak) and assume that all statics have a destructor.
+ let (might_be_alive, will_be_dropped) = match root_place {
+ Place::Static(statik) => {
+ // Thread-locals might be dropped after the function exits, but
+ // "true" statics will never be.
+ let is_thread_local = self.tcx.get_attrs(statik.def_id).iter().any(|attr| {
+ attr.check_name("thread_local")
+ });
+
+ (true, is_thread_local)
+ }
+ Place::Local(_) => {
+ // Locals are always dropped at function exit, and if they
+ // have a destructor it would've been called already.
+ (false, true)
+ }
+ Place::Projection(..) => bug!("root of {:?} is a projection ({:?})?",
+ place, root_place)
+ };
+
+ if !will_be_dropped {
+ debug!("place_is_invalidated_at_exit({:?}) - won't be dropped", place);
+ return false;
+ }
+
+ // FIXME: replace this with a proper borrow_conflicts_with_place when
+ // that is merged.
+ let prefix_set = if might_be_alive {
+ PrefixSet::Supporting
+ } else {
+ PrefixSet::Shallow
+ };
+
+ self.prefixes(place, prefix_set).any(|prefix| prefix == root_place)
+ }
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
fn report_borrowed_value_does_not_live_long_enough(&mut self,
_: Context,
- (place, span): (&Place, Span),
+ (place, span): (&Place<'tcx>, Span),
end_span: Option<Span>) {
- let proper_span = match *place {
+ let root_place = self.prefixes(place, PrefixSet::All).last().unwrap();
+ let proper_span = match *root_place {
Place::Local(local) => self.mir.local_decls[local].source_info.span,
_ => span
};
-
let mut err = self.tcx.path_does_not_live_long_enough(span, "borrowed value", Origin::Mir);
err.span_label(proper_span, "temporary value created here");
err.span_label(span, "temporary value dropped here while still borrowed");
let univ = self.base_results.sets().bits_per_block();
self.curr_state.elems(univ)
}
+
+ fn with_elems_outgoing<F>(&self, f: F) where F: FnOnce(indexed_set::Elems<BD::Idx>) {
+ let mut curr_state = self.curr_state.clone();
+ curr_state.union(&self.stmt_gen);
+ curr_state.subtract(&self.stmt_kill);
+ let univ = self.base_results.sets().bits_per_block();
+ f(curr_state.elems(univ));
+ }
}
this.cfg.terminate(block, source_info, TerminatorKind::Call {
func: fun,
args,
- cleanup,
+ cleanup: Some(cleanup),
destination: if diverges {
None
} else {
ExprKind::Break { .. } |
ExprKind::InlineAsm { .. } |
ExprKind::Return {.. } => {
- this.stmt_expr(block, expr)
+ unpack!(block = this.stmt_expr(block, expr));
+ this.cfg.push_assign_unit(block, source_info, destination);
+ block.unit()
}
// these are the cases that are more naturally handled by some other mode
}),
args: vec![val, expect],
destination: Some((eq_result.clone(), eq_block)),
- cleanup,
+ cleanup: Some(cleanup),
});
// check the result
assert_eq!(scope.region_scope, region_scope.0);
self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
+ let resume_block = self.resume_block();
unpack!(block = build_scope_drops(&mut self.cfg,
+ resume_block,
&scope,
&self.scopes,
block,
}
{
+ let resume_block = self.resume_block();
let mut rest = &mut self.scopes[(len - scope_count)..];
while let Some((scope, rest_)) = {rest}.split_last_mut() {
rest = rest_;
self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
unpack!(block = build_scope_drops(&mut self.cfg,
+ resume_block,
scope,
rest,
block,
let src_info = self.scopes[0].source_info(self.fn_span);
let mut block = self.cfg.start_new_block();
let result = block;
+ let resume_block = self.resume_block();
let mut rest = &mut self.scopes[..];
while let Some((scope, rest_)) = {rest}.split_last_mut() {
self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
unpack!(block = build_scope_drops(&mut self.cfg,
+ resume_block,
scope,
rest,
block,
/// This path terminates in Resume. Returns the start of the path.
/// See module comment for more details. None indicates there’s no
/// cleanup to do at this point.
- pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
+ pub fn diverge_cleanup(&mut self) -> BasicBlock {
self.diverge_cleanup_gen(false)
}
- fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> {
- if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
- return None;
+ fn resume_block(&mut self) -> BasicBlock {
+ if let Some(target) = self.cached_resume_block {
+ target
+ } else {
+ let resumeblk = self.cfg.start_new_cleanup_block();
+ self.cfg.terminate(resumeblk,
+ SourceInfo {
+ scope: ARGUMENT_VISIBILITY_SCOPE,
+ span: self.fn_span
+ },
+ TerminatorKind::Resume);
+ self.cached_resume_block = Some(resumeblk);
+ resumeblk
}
- assert!(!self.scopes.is_empty()); // or `any` above would be false
+ }
+
+ fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
+ // To start, create the resume terminator.
+ let mut target = self.resume_block();
- let Builder { ref mut cfg, ref mut scopes,
- ref mut cached_resume_block, .. } = *self;
+ let Builder { ref mut cfg, ref mut scopes, .. } = *self;
// Build up the drops in **reverse** order. The end result will
// look like:
// store caches. If everything is cached, we'll just walk right
// to left reading the cached results but never created anything.
- // To start, create the resume terminator.
- let mut target = if let Some(target) = *cached_resume_block {
- target
- } else {
- let resumeblk = cfg.start_new_cleanup_block();
- cfg.terminate(resumeblk,
- scopes[0].source_info(self.fn_span),
- TerminatorKind::Resume);
- *cached_resume_block = Some(resumeblk);
- resumeblk
- };
-
- for scope in scopes.iter_mut() {
- target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
- scope, target, generator_drop);
+ if scopes.iter().any(|scope| scope.needs_cleanup) {
+ for scope in scopes.iter_mut() {
+ target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
+ scope, target, generator_drop);
+ }
}
- Some(target)
+
+ target
}
/// Utility function for *non*-scope code to build their own drops
TerminatorKind::Drop {
location,
target: next_target,
- unwind: diverge_target,
+ unwind: Some(diverge_target),
});
next_target.unit()
}
location,
value,
target: next_target,
- unwind: diverge_target,
+ unwind: Some(diverge_target),
});
next_target.unit()
}
expected,
msg,
target: success_block,
- cleanup,
+ cleanup: Some(cleanup),
});
success_block
/// Builds drops for pop_scope and exit_scope.
fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
+ resume_block: BasicBlock,
scope: &Scope<'tcx>,
earlier_scopes: &[Scope<'tcx>],
mut block: BasicBlock,
cfg.terminate(block, source_info, TerminatorKind::Drop {
location: drop_data.location.clone(),
target: next,
- unwind: on_diverge
+ unwind: Some(on_diverge.unwrap_or(resume_block))
});
block = next;
}
place: &mir::Place<'tcx>) -> bool {
let ty = place.ty(mir, tcx).to_ty(tcx);
match ty.sty {
- ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => {
+ ty::TyArray(..) => {
+ debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false",
+ place, ty);
+ false
+ }
+ ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
place, ty);
true
&self.borrows[idx].location
}
+ pub fn nonlexical_regioncx(&self) -> Option<&'a RegionInferenceContext<'tcx>> {
+ self.nonlexical_regioncx
+ }
+
/// Returns the span for the "end point" given region. This will
/// return `None` if NLL is enabled, since that concept has no
/// meaning there. Otherwise, return region span if it exists and
mir::StatementKind::Assign(_, ref rhs) => {
if let mir::Rvalue::Ref(region, _, ref place) = *rhs {
if is_unsafe_place(self.tcx, self.mir, place) { return; }
+ if let RegionKind::ReEmpty = region {
+ // If the borrowed value is dead, the region for it
+ // can be empty. Don't track the borrow in that case.
+ return
+ }
+
let index = self.location_map.get(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
fn downcast_subpath(&self, _path: Self::Path, _variant: usize) -> Option<Self::Path> {
Some(())
}
+ fn array_subpath(&self, _path: Self::Path, _index: u32, _size: u32) -> Option<Self::Path> {
+ None
+ }
}
/// Build a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
})
}
+ fn array_subpath(&self, path: Self::Path, index: u32, size: u32) -> Option<Self::Path> {
+ dataflow::move_path_children_matching(self.ctxt.move_data(), path, |p| {
+ match p {
+ &Projection {
+ elem: ProjectionElem::ConstantIndex{offset, min_length: _, from_end: false}, ..
+ } => offset == index,
+ &Projection {
+ elem: ProjectionElem::ConstantIndex{offset, min_length: _, from_end: true}, ..
+ } => size - offset == index,
+ _ => false
+ }
+ })
+ }
+
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> {
dataflow::move_path_children_matching(self.ctxt.move_data(), path, |p| {
match p {
pub mod add_call_guards;
pub mod promote_consts;
pub mod qualify_consts;
+pub mod remove_noop_landing_pads;
pub mod dump_mir;
pub mod deaggregator;
pub mod instcombine;
let mut mir = tcx.mir_validated(def_id).steal();
run_passes![tcx, mir, def_id, 2;
+ // Remove all things not needed by analysis
no_landing_pads::NoLandingPads,
simplify_branches::SimplifyBranches::new("initial"),
+ remove_noop_landing_pads::RemoveNoopLandingPads,
+ simplify::SimplifyCfg::new("early-opt"),
// These next passes must be executed together
add_call_guards::CriticalCallEdges,
instcombine::InstCombine,
deaggregator::Deaggregator,
copy_prop::CopyPropagation,
+ remove_noop_landing_pads::RemoveNoopLandingPads,
+ simplify::SimplifyCfg::new("final"),
simplify::SimplifyLocals,
generator::StateTransform,
bb: BasicBlock,
terminator: &mut Terminator<'tcx>,
location: Location) {
- match terminator.kind {
- TerminatorKind::Goto { .. } |
- TerminatorKind::Resume |
- TerminatorKind::Return |
- TerminatorKind::Unreachable |
- TerminatorKind::GeneratorDrop |
- TerminatorKind::Yield { .. } |
- TerminatorKind::SwitchInt { .. } |
- TerminatorKind::FalseEdges { .. } => {
- /* nothing to do */
- },
- TerminatorKind::Call { cleanup: ref mut unwind, .. } |
- TerminatorKind::Assert { cleanup: ref mut unwind, .. } |
- TerminatorKind::DropAndReplace { ref mut unwind, .. } |
- TerminatorKind::Drop { ref mut unwind, .. } => {
- unwind.take();
- },
+ if let Some(unwind) = terminator.kind.unwind_mut() {
+ unwind.take();
}
self.super_terminator(bb, terminator, location);
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::ty::TyCtxt;
+use rustc::mir::*;
+use rustc_data_structures::bitvec::BitVector;
+use rustc_data_structures::indexed_vec::Idx;
+use transform::{MirPass, MirSource};
+use util::patch::MirPatch;
+
+/// A pass that removes no-op landing pads and replaces jumps to them with
+/// `None`. This is important because otherwise LLVM generates terrible
+/// code for these.
+pub struct RemoveNoopLandingPads;
+
+impl MirPass for RemoveNoopLandingPads {
+ fn run_pass<'a, 'tcx>(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ _src: MirSource,
+ mir: &mut Mir<'tcx>) {
+ if tcx.sess.no_landing_pads() {
+ return
+ }
+
+ debug!("remove_noop_landing_pads({:?})", mir);
+ self.remove_nop_landing_pads(mir);
+ }
+}
+
+impl RemoveNoopLandingPads {
+ fn is_nop_landing_pad(&self, bb: BasicBlock, mir: &Mir, nop_landing_pads: &BitVector)
+ -> bool
+ {
+ for stmt in &mir[bb].statements {
+ match stmt.kind {
+ StatementKind::StorageLive(_) |
+ StatementKind::StorageDead(_) |
+ StatementKind::EndRegion(_) |
+ StatementKind::Nop => {
+ // These are all nops in a landing pad (there's some
+ // borrowck interaction between EndRegion and storage
+ // instructions, but this should all run after borrowck).
+ }
+
+ StatementKind::Assign(Place::Local(_), Rvalue::Use(_)) => {
+ // Writing to a local (e.g. a drop flag) does not
+ // turn a landing pad to a non-nop
+ }
+
+ StatementKind::Assign(_, _) |
+ StatementKind::SetDiscriminant { .. } |
+ StatementKind::InlineAsm { .. } |
+ StatementKind::Validate { .. } => {
+ return false;
+ }
+ }
+ }
+
+ let terminator = mir[bb].terminator();
+ match terminator.kind {
+ TerminatorKind::Goto { .. } |
+ TerminatorKind::Resume |
+ TerminatorKind::SwitchInt { .. } |
+ TerminatorKind::FalseEdges { .. } => {
+ terminator.successors().iter().all(|succ| {
+ nop_landing_pads.contains(succ.index())
+ })
+ },
+ TerminatorKind::GeneratorDrop |
+ TerminatorKind::Yield { .. } |
+ TerminatorKind::Return |
+ TerminatorKind::Unreachable |
+ TerminatorKind::Call { .. } |
+ TerminatorKind::Assert { .. } |
+ TerminatorKind::DropAndReplace { .. } |
+ TerminatorKind::Drop { .. } => {
+ false
+ }
+ }
+ }
+
+ fn remove_nop_landing_pads(&self, mir: &mut Mir) {
+ // make sure there's a single resume block
+ let resume_block = {
+ let patch = MirPatch::new(mir);
+ let resume_block = patch.resume_block();
+ patch.apply(mir);
+ resume_block
+ };
+ debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
+
+ let mut jumps_folded = 0;
+ let mut landing_pads_removed = 0;
+ let mut nop_landing_pads = BitVector::new(mir.basic_blocks().len());
+
+ // This is a post-order traversal, so that if A post-dominates B
+ // then A will be visited before B.
+ let postorder: Vec<_> = traversal::postorder(mir).map(|(bb, _)| bb).collect();
+ for bb in postorder {
+ debug!(" processing {:?}", bb);
+ for target in mir[bb].terminator_mut().successors_mut() {
+ if *target != resume_block && nop_landing_pads.contains(target.index()) {
+ debug!(" folding noop jump to {:?} to resume block", target);
+ *target = resume_block;
+ jumps_folded += 1;
+ }
+ }
+
+ match mir[bb].terminator_mut().unwind_mut() {
+ Some(unwind) => {
+ if *unwind == Some(resume_block) {
+ debug!(" removing noop landing pad");
+ jumps_folded -= 1;
+ landing_pads_removed += 1;
+ *unwind = None;
+ }
+ }
+ _ => {}
+ }
+
+ let is_nop_landing_pad = self.is_nop_landing_pad(bb, mir, &nop_landing_pads);
+ if is_nop_landing_pad {
+ nop_landing_pads.insert(bb.index());
+ }
+ debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
+ }
+
+ debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed);
+ }
+}
self.collapse_goto_chain(successor, &mut changed);
}
- changed |= self.simplify_unwind(&mut terminator);
-
let mut new_stmts = vec![];
let mut inner_changed = true;
while inner_changed {
true
}
- // turn an unwind branch to a resume block into a None
- fn simplify_unwind(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
- let unwind = match terminator.kind {
- TerminatorKind::Drop { ref mut unwind, .. } |
- TerminatorKind::DropAndReplace { ref mut unwind, .. } |
- TerminatorKind::Call { cleanup: ref mut unwind, .. } |
- TerminatorKind::Assert { cleanup: ref mut unwind, .. } =>
- unwind,
- _ => return false
- };
-
- if let &mut Some(unwind_block) = unwind {
- let is_resume_block = match self.basic_blocks[unwind_block] {
- BasicBlockData {
- ref statements,
- terminator: Some(Terminator {
- kind: TerminatorKind::Resume, ..
- }), ..
- } if statements.is_empty() => true,
- _ => false
- };
- if is_resume_block {
- debug!("simplifying unwind to {:?} from {:?}",
- unwind_block, terminator.source_info);
- *unwind = None;
- }
- return is_resume_block;
- }
-
- false
- }
-
fn strip_nops(&mut self) {
for blk in self.basic_blocks.iter_mut() {
blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind {
use rustc_data_structures::indexed_vec::Idx;
use util::patch::MirPatch;
-use std::iter;
+use std::{iter, u32};
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DropFlagState {
fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path>;
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
fn downcast_subpath(&self, path: Self::Path, variant: usize) -> Option<Self::Path>;
+ fn array_subpath(&self, path: Self::Path, index: u32, size: u32) -> Option<Self::Path>;
}
#[derive(Debug)]
loop_block
}
- fn open_drop_for_array(&mut self, ety: Ty<'tcx>) -> BasicBlock {
- debug!("open_drop_for_array({:?})", ety);
+ fn open_drop_for_array(&mut self, ety: Ty<'tcx>, opt_size: Option<u64>) -> BasicBlock {
+ debug!("open_drop_for_array({:?}, {:?})", ety, opt_size);
// if size_of::<ety>() == 0 {
// index_based_loop
// ptr_based_loop
// }
- let tcx = self.tcx();
+ if let Some(size) = opt_size {
+ assert!(size <= (u32::MAX as u64),
+ "move out check doesn't implemented for array bigger then u32");
+ let size = size as u32;
+ let fields: Vec<(Place<'tcx>, Option<D::Path>)> = (0..size).map(|i| {
+ (self.place.clone().elem(ProjectionElem::ConstantIndex{
+ offset: i,
+ min_length: size,
+ from_end: false
+ }),
+ self.elaborator.array_subpath(self.path, i, size))
+ }).collect();
+
+ if fields.iter().any(|(_,path)| path.is_some()) {
+ let (succ, unwind) = self.drop_ladder_bottom();
+ return self.drop_ladder(fields, succ, unwind).0
+ }
+ }
let move_ = |place: &Place<'tcx>| Operand::Move(place.clone());
+ let tcx = self.tcx();
let size = &Place::Local(self.new_temp(tcx.types.usize));
let size_is_zero = &Place::Local(self.new_temp(tcx.types.bool));
let base_block = BasicBlockData {
let succ = self.succ;
self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
}
- ty::TyArray(ety, _) | ty::TySlice(ety) => {
- self.open_drop_for_array(ety)
- }
+ ty::TyArray(ety, size) => self.open_drop_for_array(
+ ety, size.val.to_const_int().and_then(|v| v.to_u64())),
+ ty::TySlice(ety) => self.open_drop_for_array(ety, None),
+
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
}
return false;
}
+ function generateId(ty) {
+ if (ty.parent && ty.parent.name) {
+ return itemTypes[ty.ty] + ty.path + ty.parent.name + ty.name;
+ }
+ return itemTypes[ty.ty] + ty.path + ty.name;
+ }
+
// quoted values mean literal search
var nSearchWords = searchWords.length;
if ((val.charAt(0) === "\"" || val.charAt(0) === "'") &&
var in_args = findArg(searchIndex[i], val, true);
var returned = checkReturned(searchIndex[i], val, true);
var ty = searchIndex[i];
- var fullId = itemTypes[ty.ty] + ty.path + ty.name;
+ var fullId = generateId(ty);
if (searchWords[i] === val.name) {
// filter type: ... queries
if (!type) {
continue;
}
- var fullId = itemTypes[ty.ty] + ty.path + ty.name;
+ var fullId = generateId(ty);
// allow searching for void (no output) functions as well
var typeOutput = type.output ? type.output.name : "";
var index = -1;
// we want lev results to go lower than others
var lev = MAX_LEV_DISTANCE + 1;
- var fullId = itemTypes[ty.ty] + ty.path + ty.name;
+ var fullId = generateId(ty);
if (searchWords[j].indexOf(split[i]) > -1 ||
searchWords[j].indexOf(val) > -1 ||
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// revisions: ast mir
+//[mir]compile-flags: -Z borrowck=mir
+
+fn cplusplus_mode_exceptionally_unsafe(x: &mut Option<&'static mut isize>) {
+ let mut z = (0, 0);
+ *x = Some(&mut z.1); //[ast]~ ERROR [E0597]
+ //[mir]~^ ERROR [E0597]
+ panic!("catch me for a dangling pointer!")
+}
+
+fn main() {
+ cplusplus_mode_exceptionally_unsafe(&mut None);
+}
// StorageLive(_5);
// StorageLive(_6);
// _6 = move _4;
-// replace(_5 <- move _6) -> [return: bb1, unwind: bb5];
+// replace(_5 <-move _6) -> [return: bb2, unwind: bb5];
// }
// bb1: {
-// drop(_6) -> [return: bb6, unwind: bb4];
+// resume;
// }
// bb2: {
-// resume;
+// drop(_6) -> [return: bb6, unwind: bb4];
// }
// bb3: {
-// drop(_4) -> bb2;
+// drop(_4) -> bb1;
// }
// bb4: {
// drop(_5) -> bb3;
// }
// bb7: {
// StorageDead(_5);
-// drop(_4) -> bb8;
+// drop(_4) -> [return: bb8, unwind: bb1];
// }
// bb8: {
// StorageDead(_4);
// StorageLive(_1);
// StorageLive(_2);
// _2 = Box(S);
-// (*_2) = const S::new() -> [return: bb1, unwind: bb3];
+// (*_2) = const S::new() -> [return: bb2, unwind: bb3];
// }
//
// bb1: {
-// _1 = move _2;
-// drop(_2) -> bb4;
+// resume;
// }
//
// bb2: {
-// resume;
+// _1 = move _2;
+// drop(_2) -> bb4;
// }
//
// bb3: {
-// drop(_2) -> bb2;
+// drop(_2) -> bb1;
// }
//
// bb4: {
// }
//
// bb6: {
-// drop(_1) -> bb2;
+// drop(_1) -> bb1;
// }
//
// bb7: {
// _3 = &'26_2rs _2;
// StorageLive(_5);
// _5 = (*_3);
-// _4 = const foo(move _5) -> [return: bb1, unwind: bb3];
+// _4 = const foo(move _5) -> [return: bb2, unwind: bb3];
// }
// bb1: {
+// resume;
+// }
+// bb2: {
// StorageDead(_5);
// StorageLive(_6);
// _6 = &'26_4rs _2;
// EndRegion('26_2rs);
// StorageDead(_3);
// StorageDead(_2);
-// drop(_1) -> bb4;
-// }
-// bb2: {
-// resume;
+// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('26_2rs);
-// drop(_1) -> bb2;
+// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
// _4 = &'14s _1;
// _3 = [closure@NodeId(18)] { d: move _4 };
// StorageDead(_4);
-// _2 = const foo(move _3) -> [return: bb1, unwind: bb3];
+// _2 = const foo(move _3) -> [return: bb2, unwind: bb3];
// }
// bb1: {
+// resume;
+// }
+// bb2: {
// EndRegion('14s);
// StorageDead(_3);
// _0 = ();
-// drop(_1) -> bb4;
-// }
-// bb2: {
-// resume;
+// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('14s);
-// drop(_1) -> bb2;
+// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
// _4 = &'19s _1;
// _3 = [closure@NodeId(22)] { d: move _4 };
// StorageDead(_4);
-// _2 = const foo(move _3) -> [return: bb1, unwind: bb3];
+// _2 = const foo(move _3) -> [return: bb2, unwind: bb3];
// }
// bb1: {
+// resume;
+// }
+// bb2: {
// EndRegion('19s);
// StorageDead(_3);
// _0 = ();
-// drop(_1) -> bb4;
-// }
-// bb2: {
-// resume;
+// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('19s);
-// drop(_1) -> bb2;
+// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
// bb6: {
// StorageDead(_3);
// _0 = ();
-// drop(_1) -> bb7;
+// drop(_1) -> [return: bb7, unwind: bb1];
// }
// bb7: {
// StorageDead(_1);
// StorageDead(_3);
// EndRegion('15_0rs);
// StorageDead(_2);
-// drop(_1) -> bb1;
+// drop(_1) -> [return: bb2, unwind: bb1];
// }
// bb1: {
+// resume;
+// }
+// bb2: {
// return;
// }
// }
// _5 = _2;
// _4 = [closure@NodeId(22)] { r: move _5 };
// StorageDead(_5);
-// _3 = const foo(move _4) -> [return: bb1, unwind: bb3];
+// _3 = const foo(move _4) -> [return: bb2, unwind: bb3];
// }
// bb1: {
+// resume;
+// }
+// bb2: {
// StorageDead(_4);
// _0 = ();
// EndRegion('21_1rs);
// StorageDead(_2);
-// drop(_1) -> bb4;
-// }
-// bb2: {
-// resume;
+// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('21_1rs);
-// drop(_1) -> bb2;
+// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
// let mut _15: std::option::Option<&'35_0rs S<'35_0rs>>;
// let mut _16: &'35_0rs S<'35_0rs>;
// let mut _17: &'35_0rs S<'35_0rs>;
+//
// bb0: {
// goto -> bb1;
// }
// StorageLive(_3);
// StorageLive(_4);
// _4 = std::option::Option<&'35_0rs S<'35_0rs>>::None;
-// _3 = const <std::cell::Cell<T>>::new(move _4) -> bb2;
+// _3 = const <std::cell::Cell<T>>::new(move _4) -> [return: bb3, unwind: bb2];
// }
// bb2: {
+// resume;
+// }
+// bb3: {
// StorageDead(_4);
// _2 = S<'35_0rs> { r: move _3 };
// StorageDead(_3);
// _8 = &'35_0rs (*_9);
// _7 = std::option::Option<&'35_0rs S<'35_0rs>>::Some(move _8,);
// StorageDead(_8);
-// _5 = const <std::cell::Cell<T>>::set(move _6, move _7) -> bb3;
+// _5 = const <std::cell::Cell<T>>::set(move _6, move _7) -> [return: bb4, unwind: bb2];
// }
-// bb3: {
+// bb4: {
// EndRegion('16s);
// StorageDead(_7);
// StorageDead(_6);
// StorageDead(_9);
// StorageLive(_11);
-// _11 = const query() -> bb4;
-// }
-// bb4: {
-// switchInt(move _11) -> [0u8: bb6, otherwise: bb5];
+// _11 = const query() -> [return: bb5, unwind: bb2];
// }
// bb5: {
+// switchInt(move _11) -> [0u8: bb7, otherwise: bb6];
+// }
+// bb6: {
// _0 = ();
// StorageDead(_11);
// EndRegion('35_0rs);
// StorageDead(_2);
// return;
// }
-// bb6: {
+// bb7: {
// _10 = ();
// StorageDead(_11);
// StorageLive(_14);
// _16 = &'35_0rs (*_17);
// _15 = std::option::Option<&'35_0rs S<'35_0rs>>::Some(move _16,);
// StorageDead(_16);
-// _13 = const <std::cell::Cell<T>>::set(move _14, move_15) -> bb7;
+// _13 = const <std::cell::Cell<T>>::set(move _14, move _15) -> [return: bb8, unwind: bb2];
// }
-// bb7: {
+// bb8: {
// EndRegion('33s);
// StorageDead(_15);
// StorageDead(_14);
// _2 = (_3.0: &'12ds S1);
// _1 = move _2;
// StorageDead(_2);
-// drop(_3) -> bb1;
+// drop(_3) -> [return: bb2, unwind: bb1];
// }
//
// bb1: {
+// resume;
+// }
+//
+// bb2: {
// StorageDead(_3);
// StorageDead(_8);
// StorageDead(_9);
// _2 = (_3.0: &'12ds S1);
// _1 = move _2;
// StorageDead(_2);
-// drop(_3) -> bb1;
+// drop(_3) -> [return: bb2, unwind: bb1];
// }
//
// bb1: {
+// resume;
+// }
+//
+// bb2: {
// StorageDead(_3);
// StorageDead(_8);
// StorageDead(_5);
// END RUST SOURCE
//
-// START rustc.full_tested_match.SimplifyBranches-initial.before.mir
+// START rustc.full_tested_match.QualifyAndPromoteConstants.after.mir
// bb0: {
// ...
// _2 = std::option::Option<i32>::Some(const 42i32,);
// _5 = discriminant(_2);
-// switchInt(move _5) -> [0isize: bb5, 1isize: bb3, otherwise: bb7];
+// switchInt(move _5) -> [0isize: bb6, 1isize: bb4, otherwise: bb8];
// }
-// bb1: { // arm1
+// bb1: {
+// resume;
+// }
+// bb2: { // arm1
// StorageLive(_7);
// _7 = _3;
// _1 = (const 1i32, move _7);
// StorageDead(_7);
-// goto -> bb12;
+// goto -> bb13;
// }
-// bb2: { // binding3(empty) and arm3
+// bb3: { // binding3(empty) and arm3
// _1 = (const 3i32, const 3i32);
-// goto -> bb12;
-// }
-// bb3: {
-// falseEdges -> [real: bb8, imaginary: bb4]; //pre_binding1
+// goto -> bb13;
// }
// bb4: {
-// falseEdges -> [real: bb11, imaginary: bb5]; //pre_binding2
+// falseEdges -> [real: bb9, imaginary: bb5]; //pre_binding1
// }
// bb5: {
-// falseEdges -> [real: bb2, imaginary: bb6]; //pre_binding3
+// falseEdges -> [real: bb12, imaginary: bb6]; //pre_binding2
// }
// bb6: {
-// unreachable;
+// falseEdges -> [real: bb3, imaginary: bb7]; //pre_binding3
// }
// bb7: {
// unreachable;
// }
-// bb8: { // binding1 and guard
+// bb8: {
+// unreachable;
+// }
+// bb9: { // binding1 and guard
// StorageLive(_3);
// _3 = ((_2 as Some).0: i32);
// StorageLive(_6);
-// _6 = const guard() -> bb9;
+// _6 = const guard() -> [return: bb10, unwind: bb1];
// }
-// bb9: { // end of guard
-// switchInt(move _6) -> [0u8: bb10, otherwise: bb1];
+// bb10: { // end of guard
+// switchInt(move _6) -> [0u8: bb11, otherwise: bb2];
// }
-// bb10: { // to pre_binding2
-// falseEdges -> [real: bb4, imaginary: bb4];
+// bb11: { // to pre_binding2
+// falseEdges -> [real: bb5, imaginary: bb5];
// }
-// bb11: { // bindingNoLandingPads.before.mir2 and arm2
+// bb12: { // bindingNoLandingPads.before.mir2 and arm2
// StorageLive(_4);
// _4 = ((_2 as Some).0: i32);
// StorageLive(_8);
// _8 = _4;
// _1 = (const 2i32, move _8);
// StorageDead(_8);
-// goto -> bb12;
+// goto -> bb13;
// }
-// bb12: {
+// bb13: {
// ...
// return;
// }
-// END rustc.full_tested_match.SimplifyBranches-initial.before.mir
+// END rustc.full_tested_match.QualifyAndPromoteConstants.after.mir
//
-// START rustc.full_tested_match2.SimplifyBranches-initial.before.mir
+// START rustc.full_tested_match2.QualifyAndPromoteConstants.before.mir
// bb0: {
// ...
// _2 = std::option::Option<i32>::Some(const 42i32,);
// _5 = discriminant(_2);
-// switchInt(move _5) -> [0isize: bb4, 1isize: bb3, otherwise: bb7];
+// switchInt(move _5) -> [0isize: bb5, 1isize: bb4, otherwise: bb8];
+// }
+// bb1: {
+// resume;
// }
-// bb1: { // arm1
+// bb2: { // arm1
// StorageLive(_7);
// _7 = _3;
// _1 = (const 1i32, move _7);
// StorageDead(_7);
-// goto -> bb12;
+// goto -> bb13;
// }
-// bb2: { // binding3(empty) and arm3
+// bb3: { // binding3(empty) and arm3
// _1 = (const 3i32, const 3i32);
-// goto -> bb12;
-// }
-// bb3: {
-// falseEdges -> [real: bb8, imaginary: bb4]; //pre_binding1
+// goto -> bb13;
// }
// bb4: {
-// falseEdges -> [real: bb2, imaginary: bb5]; //pre_binding2
+// falseEdges -> [real: bb9, imaginary: bb5]; //pre_binding1
// }
// bb5: {
-// falseEdges -> [real: bb11, imaginary: bb6]; //pre_binding3
+// falseEdges -> [real: bb3, imaginary: bb6]; //pre_binding2
// }
// bb6: {
-// unreachable;
+// falseEdges -> [real: bb12, imaginary: bb7]; //pre_binding3
// }
// bb7: {
// unreachable;
// }
-// bb8: { // binding1 and guard
+// bb8: {
+// unreachable;
+// }
+// bb9: { // binding1 and guard
// StorageLive(_3);
// _3 = ((_2 as Some).0: i32);
// StorageLive(_6);
-// _6 = const guard() -> bb9;
+// _6 = const guard() -> [return: bb10, unwind: bb1];
// }
-// bb9: { // end of guard
-// switchInt(move _6) -> [0u8: bb10, otherwise: bb1];
+// bb10: { // end of guard
+// switchInt(move _6) -> [0u8: bb11, otherwise: bb2];
// }
-// bb10: { // to pre_binding2
-// falseEdges -> [real: bb5, imaginary: bb4];
+// bb11: { // to pre_binding2
+// falseEdges -> [real: bb6, imaginary: bb5];
// }
-// bb11: { // binding2 and arm2
+// bb12: { // binding2 and arm2
// StorageLive(_4);
// _4 = ((_2 as Some).0: i32);
// StorageLive(_8);
// _8 = _4;
// _1 = (const 2i32, move _8);
// StorageDead(_8);
-// goto -> bb12;
+// goto -> bb13;
// }
-// bb12: {
+// bb13: {
// ...
// return;
// }
-// END rustc.full_tested_match2.SimplifyBranches-initial.before.mir
+// END rustc.full_tested_match2.QualifyAndPromoteConstants.before.mir
//
-// START rustc.main.SimplifyBranches-initial.before.mir
+// START rustc.main.QualifyAndPromoteConstants.before.mir
// bb0: {
// ...
// _2 = std::option::Option<i32>::Some(const 1i32,);
// _7 = discriminant(_2);
-// switchInt(move _7) -> [1isize: bb3, otherwise: bb4];
+// switchInt(move _7) -> [1isize: bb4, otherwise: bb5];
+// }
+// bb1: {
+// resume;
// }
-// bb1: { // arm1
-// _1 = const 1i32;
-// goto -> bb16;
+// bb2: { // arm1
+// _1 = const 1i32;
+// goto -> bb17;
// }
-// bb2: { // arm3
+// bb3: { // arm3
// _1 = const 3i32;
-// goto -> bb16;
+// goto -> bb17;
// }
//
-// bb3: {
-// falseEdges -> [real: bb8, imaginary: bb4]; //pre_binding1
-// }
-// bb4: {
-// falseEdges -> [real: bb11, imaginary: bb5]; //pre_binding2
-// }
-// bb5: {
-// falseEdges -> [real: bb12, imaginary: bb6]; //pre_binding3
-// }
-// bb6: {
-// falseEdges -> [real: bb15, imaginary: bb7]; //pre_binding4
-// }
-// bb7: {
-// unreachable;
-// }
-// bb8: { // binding1: Some(w) if guard()
-// StorageLive(_3);
-// _3 = ((_2 as Some).0: i32);
-// StorageLive(_8);
-// _8 = const guard() -> bb9;
-// }
-// bb9: { //end of guard
-// switchInt(move _8) -> [0u8: bb10, otherwise: bb1];
-// }
-// bb10: { // to pre_binding2
-// falseEdges -> [real: bb4, imaginary: bb4];
-// }
-// bb11: { // binding2 & arm2
-// StorageLive(_4);
-// _4 = _2;
-// _1 = const 2i32;
-// goto -> bb16;
-// }
-// bb12: { // binding3: Some(y) if guard2(y)
-// StorageLive(_5);
-// _5 = ((_2 as Some).0: i32);
-// StorageLive(_10);
-// StorageLive(_11);
-// _11 = _5;
-// _10 = const guard2(move _11) -> bb13;
-// }
-// bb13: { // end of guard2
-// StorageDead(_11);
-// switchInt(move _10) -> [0u8: bb14, otherwise: bb2];
-// }
-// bb14: { // to pre_binding4
-// falseEdges -> [real: bb6, imaginary: bb6];
-// }
-// bb15: { // binding4 & arm4
-// StorageLive(_6);
-// _6 = _2;
-// _1 = const 4i32;
-// goto -> bb16;
-// }
-// bb16: {
+// bb4: {
+// falseEdges -> [real: bb9, imaginary: bb5]; //pre_binding1
+// }
+// bb5: {
+// falseEdges -> [real: bb12, imaginary: bb6]; //pre_binding2
+// }
+// bb6: {
+// falseEdges -> [real: bb13, imaginary: bb7]; //pre_binding3
+// }
+// bb7: {
+// falseEdges -> [real: bb16, imaginary: bb8]; //pre_binding4
+// }
+// bb8: {
+// unreachable;
+// }
+// bb9: { // binding1: Some(w) if guard()
+// StorageLive(_3);
+// _3 = ((_2 as Some).0: i32);
+// StorageLive(_8);
+// _8 = const guard() -> [return: bb10, unwind: bb1];
+// }
+// bb10: { //end of guard
+// switchInt(move _8) -> [0u8: bb11, otherwise: bb2];
+// }
+// bb11: { // to pre_binding2
+// falseEdges -> [real: bb5, imaginary: bb5];
+// }
+// bb12: { // binding2 & arm2
+// StorageLive(_4);
+// _4 = _2;
+// _1 = const 2i32;
+// goto -> bb17;
+// }
+// bb13: { // binding3: Some(y) if guard2(y)
+// StorageLive(_5);
+// _5 = ((_2 as Some).0: i32);
+// StorageLive(_10);
+// StorageLive(_11);
+// _11 = _5;
+// _10 = const guard2(move _11) -> [return: bb14, unwind: bb1];
+// }
+// bb14: { // end of guard2
+// StorageDead(_11);
+// switchInt(move _10) -> [0u8: bb15, otherwise: bb3];
+// }
+// bb15: { // to pre_binding4
+// falseEdges -> [real: bb7, imaginary: bb7];
+// }
+// bb16: { // binding4 & arm4
+// StorageLive(_6);
+// _6 = _2;
+// _1 = const 4i32;
+// goto -> bb17;
+// }
+// bb17: {
// ...
// return;
// }
-// END rustc.main.SimplifyBranches-initial.before.mir
+// END rustc.main.QualifyAndPromoteConstants.before.mir
// | Live variables at bb0[0]: []
// StorageLive(_1);
// | Live variables at bb0[1]: []
-// _1 = const <std::boxed::Box<T>>::new(const 22usize) -> bb1;
+// _1 = const <std::boxed::Box<T>>::new(const 22usize) -> [return: bb2, unwind: bb1];
// }
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
-// | Live variables on entry to bb1: [_1 (drop)]
-// bb1: {
-// | Live variables at bb1[0]: [_1 (drop)]
+// | Live variables on entry to bb2: [_1 (drop)]
+// bb2: {
+// | Live variables at bb2[0]: [_1 (drop)]
// StorageLive(_2);
-// | Live variables at bb1[1]: [_1 (drop)]
-// _2 = const can_panic() -> [return: bb2, unwind: bb4];
+// | Live variables at bb2[1]: [_1 (drop)]
+// _2 = const can_panic() -> [return: bb3, unwind: bb4];
// }
// END rustc.main.nll.0.mir
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | Live variables on entry to bb1: []
-// bb1: {
-// | Live variables at bb1[0]: []
+// | Live variables on entry to bb2: []
+// bb2: {
+// | Live variables at bb2[0]: []
// _1 = const 55usize;
-// | Live variables at bb1[1]: [_1]
+// | Live variables at bb2[1]: [_1]
// StorageLive(_3);
-// | Live variables at bb1[2]: [_1]
+// | Live variables at bb2[2]: [_1]
// StorageLive(_4);
-// | Live variables at bb1[3]: [_1]
+// | Live variables at bb2[3]: [_1]
// _4 = _1;
-// | Live variables at bb1[4]: [_4]
-// _3 = const use_x(move _4) -> bb2;
+// | Live variables at bb2[4]: [_4]
+// _3 = const use_x(move _4) -> [return: bb3, unwind: bb1];
// }
// END rustc.main.nll.0.mir
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | Live variables on entry to bb2: [_1]
-// bb2: {
-// | Live variables at bb2[0]: [_1]
+// | Live variables on entry to bb3: [_1]
+// bb3: {
+// | Live variables at bb3[0]: [_1]
// StorageLive(_4);
-// | Live variables at bb2[1]: [_1]
+// | Live variables at bb3[1]: [_1]
// _4 = _1;
-// | Live variables at bb2[2]: [_4]
-// _3 = const make_live(move _4) -> bb4;
+// | Live variables at bb3[2]: [_4]
+// _3 = const make_live(move _4) -> [return: bb5, unwind: bb1];
// }
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
-// | Live variables on entry to bb3: []
-// bb3: {
-// | Live variables at bb3[0]: []
-// _5 = const make_dead() -> bb5;
+// | Live variables on entry to bb4: []
+// bb4: {
+// | Live variables at bb4[0]: []
+// _5 = const make_dead() -> [return: bb6, unwind: bb1];
// }
// END rustc.main.nll.0.mir
-
-
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | '_#1r: {bb1[1], bb2[0], bb2[1]}
-// | '_#2r: {bb1[1], bb2[0], bb2[1]}
+// | '_#1r: {bb2[1], bb3[0], bb3[1]}
+// | '_#2r: {bb2[1], bb3[0], bb3[1]}
// ...
// let _2: &'_#2r usize;
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
-// bb1: {
-// | Live variables at bb1[0]: [_1, _3]
+// bb2: {
+// | Live variables at bb2[0]: [_1, _3]
// _2 = &'_#1r _1[_3];
-// | Live variables at bb1[1]: [_2]
-// switchInt(const true) -> [0u8: bb3, otherwise: bb2];
+// | Live variables at bb2[1]: [_2]
+// switchInt(const true) -> [0u8: bb4, otherwise: bb3];
// }
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
-// bb2: {
-// | Live variables at bb2[0]: [_2]
+// bb3: {
+// | Live variables at bb3[0]: [_2]
// StorageLive(_7);
-// | Live variables at bb2[1]: [_2]
+// | Live variables at bb3[1]: [_2]
// _7 = (*_2);
-// | Live variables at bb2[2]: [_7]
-// _6 = const use_x(move _7) -> bb4;
+// | Live variables at bb3[2]: [_7]
+// _6 = const use_x(move _7) -> [return: bb5, unwind: bb1];
// }
// END rustc.main.nll.0.mir
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | '_#5r: {bb1[3], bb1[4], bb1[5], bb2[0], bb2[1]}
+// | '_#5r: {bb2[3], bb2[4], bb2[5], bb3[0], bb3[1]}
// END rustc.main.nll.0.mir
// including) the call to `use_x`. The `else` branch is not included.
// ignore-tidy-linelength
-// ignore-test #46267
// compile-flags:-Znll -Zverbose
// ^^^^^^^^^ force compiler to dump more region information
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | '_#5r: {bb1[3], bb1[4], bb1[5], bb2[0], bb2[1], bb2[2], bb3[0], bb4[0], bb4[1], bb4[2], bb6[0], bb7[0], bb7[1], bb8[0]}
+// | '_#5r: {bb2[3], bb2[4], bb2[5], bb3[0], bb3[1], bb3[2], bb4[0], bb5[0], bb5[1], bb5[2], bb6[0], bb7[0], bb7[1], bb8[0]}
// END rustc.main.nll.0.mir
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | '_#1r: {bb1[1], bb2[0], bb2[1]}
+// | '_#1r: {bb2[1], bb3[0], bb3[1]}
// ...
-// | '_#3r: {bb7[2], bb7[3], bb7[4]}
-// | '_#4r: {bb1[1], bb2[0], bb2[1], bb7[2], bb7[3], bb7[4]}
+// | '_#3r: {bb8[2], bb8[3], bb8[4]}
+// | '_#4r: {bb2[1], bb3[0], bb3[1], bb8[2], bb8[3], bb8[4]}
// ...
// let mut _2: &'_#4r usize;
// ...
// END RUST SOURCE
// START rustc.main.nll.0.mir
-// | '_#1r: {bb1[1], bb1[2], bb1[3], bb1[4], bb1[5], bb1[6], bb2[0], bb2[1]}
-// | '_#2r: {bb1[1], bb1[2], bb1[3], bb1[4], bb1[5], bb1[6], bb2[0], bb2[1]}
-// | '_#3r: {bb1[5], bb1[6], bb2[0], bb2[1]}
+// | '_#1r: {bb2[1], bb2[2], bb2[3], bb2[4], bb2[5], bb2[6], bb3[0], bb3[1]}
+// | '_#2r: {bb2[1], bb2[2], bb2[3], bb2[4], bb2[5], bb2[6], bb3[0], bb3[1]}
+// | '_#3r: {bb2[5], bb2[6], bb3[0], bb3[1]}
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
// let _2: &'_#2r usize;
// (_1.0: Aligned) = move _4;
// StorageDead(_4);
// _0 = ();
-// drop(_1) -> bb2;
+// drop(_1) -> [return: bb2, unwind: bb1];
// }
// }
// END rustc.main.EraseRegions.before.mir
// END RUST SOURCE
// START rustc.main.SimplifyBranches-initial.before.mir
// bb0: {
-// switchInt(const false) -> [0u8: bb2, otherwise: bb1];
+// switchInt(const false) -> [0u8: bb3, otherwise: bb2];
// }
// END rustc.main.SimplifyBranches-initial.before.mir
// START rustc.main.SimplifyBranches-initial.after.mir
// bb0: {
-// goto -> bb2;
+// goto -> bb3;
// }
// END rustc.main.SimplifyBranches-initial.after.mir
// except according to those terms.
// ignore-tidy-linelength
+// ignore-wasm32-bare unwinding being disabled causes differences in output
+// ignore-wasm64-bare unwinding being disabled causes differences in output
// compile-flags: -Z verbose -Z mir-emit-validate=1
fn main() {
// StorageDead(_3);
// _0 = ();
// Validate(Release, [_1: std::boxed::Box<[i32]>]);
-// drop(_1) -> bb2;
+// drop(_1) -> [return: bb2, unwind: bb3];
// }
// ...
// }
// Validate(Acquire, [_1: &ReFree(DefId(0/1:9 ~ validate_4[317d]::write_42[0]::{{closure}}[0]), BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// Validate(Release, [_1: &ReFree(DefId(0/1:9 ~ validate_4[317d]::write_42[0]::{{closure}}[0]), BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// (*_2) = const 23i32;
+// _0 = ();
// return;
// }
// }
// ignore-wasm32-bare compiled with panic=abort by default
-#![feature(generators, generator_trait, untagged_unions)]
+#![feature(generators, generator_trait, untagged_unions, slice_patterns, advanced_slice_patterns)]
use std::cell::{Cell, RefCell};
use std::ops::Generator;
let _x = vec![a.alloc(), a.alloc(), a.alloc(), return];
}
+fn slice_pattern_first(a: &Allocator) {
+ let[_x, ..] = [a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_middle(a: &Allocator) {
+ let[_, _x, _] = [a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_two(a: &Allocator) {
+ let[_x, _, _y, _] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_last(a: &Allocator) {
+ let[.., _y] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_one_of(a: &Allocator, i: usize) {
+ let array = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+ let _x = match i {
+ 0 => { let [a, ..] = array; a }
+ 1 => { let [_, a, ..] = array; a }
+ 2 => { let [_, _, a, _] = array; a }
+ 3 => { let [_, _, _, a] = array; a }
+ _ => panic!("unmatched"),
+ };
+}
+
fn run_test<F>(mut f: F)
where F: FnMut(&Allocator)
{
run_test(|a| mixed_drop_and_nondrop(a));
+ run_test(|a| slice_pattern_first(a));
+ run_test(|a| slice_pattern_middle(a));
+ run_test(|a| slice_pattern_two(a));
+ run_test(|a| slice_pattern_last(a));
+ run_test(|a| slice_pattern_one_of(a, 0));
+ run_test(|a| slice_pattern_one_of(a, 1));
+ run_test(|a| slice_pattern_one_of(a, 2));
+ run_test(|a| slice_pattern_one_of(a, 3));
+
run_test_nopanic(|a| union1(a));
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// compile-flags: -Z borrowck=compare
static mut DROP: isize = 0;
static mut DROP_S: isize = 0;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// compile-flags: -Z borrowck=compare
use std::cell::Cell;
use std::mem::swap;