/// The various "big phases" that MIR goes through.
///
+/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
+/// dialects forbid certain variants or values in certain phases.
+///
+/// Note: Each phase's validation checks all invariants of the *previous* phases' dialects. A phase
+/// that changes the dialect documents what invariants must be uphelpd *after* that phase finishes.
+///
/// Warning: ordering of variants is significant.
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(HashStable)]
pub enum MirPhase {
Build = 0,
+ // FIXME: it's unclear whether we still need this phase (and its corresponding query).
+ // We used to have this for pre-miri MIR based const eval.
Const = 1,
- Validated = 2,
- DropElab = 3,
- Optimized = 4,
+ /// This phase checks the MIR for promotable elements and takes them out of the main MIR body
+ /// by creating a new MIR body per promoted element. After this phase (and thus the termination
+ /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
+ /// query.
+ ConstPromotion = 2,
+ /// After this phase
+ /// * the only `AggregateKind`s allowed are `Array` and `Generator`,
+ /// * `DropAndReplace` is gone for good
+ /// * `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop` terminator
+ /// means that the auto-generated drop glue will be invoked.
+ DropLowering = 3,
+ /// After this phase, generators are explicit state machines (no more `Yield`).
+ /// `AggregateKind::Generator` is gone for good.
+ GeneratorLowering = 4,
+ Optimized = 5,
}
impl MirPhase {
desc { |tcx| "elaborating drops for `{}`", tcx.def_path_str(key.did.to_def_id()) }
}
- query mir_validated(key: ty::WithOptConstParam<LocalDefId>) ->
+ query mir_promoted(key: ty::WithOptConstParam<LocalDefId>) ->
(
&'tcx Steal<mir::Body<'tcx>>,
&'tcx Steal<IndexVec<mir::Promoted, mir::Body<'tcx>>>
cache_on_disk_if { key.is_local() }
}
+ /// The `DefId` is the `DefId` of the containing MIR body. Promoteds to not have their own
+ /// `DefId`.
query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
desc { |tcx| "optimizing promoted MIR for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
/// everything we need to re-run the query.
///
-/// Take the `mir_validated` query as an example. Like many other queries, it
+/// Take the `mir_promoted` query as an example. Like many other queries, it
/// just has a single parameter: the `DefId` of the item it will compute the
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
) -> &'tcx BorrowCheckResult<'tcx> {
- let (input_body, promoted) = tcx.mir_validated(def);
+ let (input_body, promoted) = tcx.mir_promoted(def);
debug!("run query mir_borrowck: {}", tcx.def_path_str(def.did.to_def_id()));
let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_hir as hir;
use rustc_middle::mir::interpret::InterpResult;
-use rustc_middle::ty::{self, query::TyCtxtAt, Ty};
+use rustc_middle::ty::{self, layout::TyAndLayout, query::TyCtxtAt, Ty};
+use rustc_target::abi::Size;
use rustc_ast::Mutability;
}
}
}
+
+impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// A helper function that allocates memory for the layout given and gives you access to mutate
+ /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
+ /// current `Memory` and returned.
+ pub(crate) fn with_temp_alloc(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ f: impl FnOnce(
+ &mut InterpCx<'mir, 'tcx, M>,
+ MPlaceTy<'tcx, M::PointerTag>,
+ ) -> InterpResult<'tcx, ()>,
+ ) -> InterpResult<'tcx, &'tcx Allocation> {
+ let dest = self.allocate(layout, MemoryKind::Stack);
+ f(self, dest)?;
+ let ptr = dest.ptr.assert_ptr();
+ assert_eq!(ptr.offset, Size::ZERO);
+ let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1;
+ alloc.mutability = Mutability::Not;
+ Ok(self.tcx.intern_const_alloc(alloc))
+ }
+}
MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
};
use rustc_middle::mir::{
- AggregateKind, AssertKind, BasicBlock, BinOp, Body, ClearCrossCrate, Constant, Local,
- LocalDecl, LocalKind, Location, Operand, Place, Rvalue, SourceInfo, SourceScope,
- SourceScopeData, Statement, StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
+ AssertKind, BasicBlock, BinOp, Body, ClearCrossCrate, Constant, Local, LocalDecl, LocalKind,
+ Location, Operand, Place, Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement,
+ StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutError, TyAndLayout};
use rustc_middle::ty::subst::{InternalSubsts, Subst};
use crate::const_eval::ConstEvalErr;
use crate::interpret::{
- self, compile_time_machine, truncate, AllocId, Allocation, Frame, ImmTy, Immediate, InterpCx,
- LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand as InterpOperand, PlaceTy,
- Pointer, ScalarMaybeUninit, StackPopCleanup,
+ self, compile_time_machine, truncate, AllocId, Allocation, ConstValue, Frame, ImmTy, Immediate,
+ InterpCx, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy, Operand as InterpOperand,
+ PlaceTy, Pointer, ScalarMaybeUninit, StackPopCleanup,
};
use crate::transform::{MirPass, MirSource};
));
}
Immediate::ScalarPair(
- ScalarMaybeUninit::Scalar(one),
- ScalarMaybeUninit::Scalar(two),
+ ScalarMaybeUninit::Scalar(_),
+ ScalarMaybeUninit::Scalar(_),
) => {
- // Found a value represented as a pair. For now only do cont-prop if type of
- // Rvalue is also a pair with two scalars. The more general case is more
- // complicated to implement so we'll do it later.
- // FIXME: implement the general case stated above ^.
- let ty = &value.layout.ty.kind;
+ // Found a value represented as a pair. For now only do const-prop if the type
+ // of `rvalue` is also a tuple with two scalars.
+ // FIXME: enable the general case stated above ^.
+ let ty = &value.layout.ty;
// Only do it for tuples
- if let ty::Tuple(substs) = ty {
+ if let ty::Tuple(substs) = ty.kind {
// Only do it if tuple is also a pair with two scalars
if substs.len() == 2 {
- let opt_ty1_ty2 = self.use_ecx(|this| {
+ let alloc = self.use_ecx(|this| {
let ty1 = substs[0].expect_ty();
let ty2 = substs[1].expect_ty();
let ty_is_scalar = |ty| {
== Some(true)
};
if ty_is_scalar(ty1) && ty_is_scalar(ty2) {
- Ok(Some((ty1, ty2)))
+ let alloc = this
+ .ecx
+ .with_temp_alloc(value.layout, |ecx, dest| {
+ ecx.write_immediate_to_mplace(*imm, dest)
+ })
+ .unwrap();
+ Ok(Some(alloc))
} else {
Ok(None)
}
});
- if let Some(Some((ty1, ty2))) = opt_ty1_ty2 {
- *rval = Rvalue::Aggregate(
- Box::new(AggregateKind::Tuple),
- vec![
- self.operand_from_scalar(one, ty1, source_info.span),
- self.operand_from_scalar(two, ty2, source_info.span),
- ],
- );
+ if let Some(Some(alloc)) = alloc {
+ *rval = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: source_info.span,
+ user_ty: None,
+ literal: self.ecx.tcx.mk_const(ty::Const {
+ ty,
+ val: ty::ConstKind::Value(ConstValue::ByRef {
+ alloc,
+ offset: Size::ZERO,
+ }),
+ }),
+ })));
}
}
}
}
+ // Scalars or scalar pairs that contain undef values are assumed to not have
+ // successfully evaluated and are thus not propagated.
_ => {}
}
}
use crate::transform::simplify;
use crate::transform::{MirPass, MirSource};
use crate::util::dump_mir;
+use crate::util::expand_aggregate;
use crate::util::storage;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::subst::{Subst, SubstsRef};
use rustc_middle::ty::GeneratorSubsts;
use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
use rustc_target::abi::VariantIdx;
}
impl TransformVisitor<'tcx> {
- // Make a GeneratorState rvalue
- fn make_state(&self, idx: VariantIdx, val: Operand<'tcx>) -> Rvalue<'tcx> {
- let adt = AggregateKind::Adt(self.state_adt_ref, idx, self.state_substs, None, None);
- Rvalue::Aggregate(box adt, vec![val])
+ // Make a GeneratorState variant assignment. `core::ops::GeneratorState` only has single
+ // element tuple variants, so we can just write to the downcasted first field and then set the
+ // discriminant to the appropriate variant.
+ fn make_state(
+ &self,
+ idx: VariantIdx,
+ val: Operand<'tcx>,
+ source_info: SourceInfo,
+ ) -> impl Iterator<Item = Statement<'tcx>> {
+ let kind = AggregateKind::Adt(self.state_adt_ref, idx, self.state_substs, None, None);
+ assert_eq!(self.state_adt_ref.variants[idx].fields.len(), 1);
+ let ty = self
+ .tcx
+ .type_of(self.state_adt_ref.variants[idx].fields[0].did)
+ .subst(self.tcx, self.state_substs);
+ expand_aggregate(
+ Place::return_place(),
+ std::iter::once((val, ty)),
+ kind,
+ source_info,
+ self.tcx,
+ )
}
// Create a Place referencing a generator struct field
if let Some((state_idx, resume, v, drop)) = ret_val {
let source_info = data.terminator().source_info;
// We must assign the value first in case it gets declared dead below
- data.statements.push(Statement {
- source_info,
- kind: StatementKind::Assign(box (
- Place::return_place(),
- self.make_state(state_idx, v),
- )),
- });
+ data.statements.extend(self.make_state(state_idx, v, source_info));
let state = if let Some((resume, resume_arg)) = resume {
// Yield
let state = 3 + self.suspension_points.len();
mir_const_qualif_const_arg: |tcx, (did, param_did)| {
mir_const_qualif(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
},
- mir_validated,
+ mir_promoted,
mir_drops_elaborated_and_const_checked,
optimized_mir,
optimized_mir_of_const_arg,
}
if validate {
- validate::Validator { when: format!("input to phase {:?}", mir_phase) }
+ validate::Validator { when: format!("input to phase {:?}", mir_phase), mir_phase }
.run_pass(tcx, source, body);
}
run_hooks(body, index, true);
if validate {
- validate::Validator { when: format!("after {} in phase {:?}", pass.name(), mir_phase) }
- .run_pass(tcx, source, body);
+ validate::Validator {
+ when: format!("after {} in phase {:?}", pass.name(), mir_phase),
+ mir_phase,
+ }
+ .run_pass(tcx, source, body);
}
index += 1;
body.phase = mir_phase;
if mir_phase == MirPhase::Optimized {
- validate::Validator { when: format!("end of phase {:?}", mir_phase) }
+ validate::Validator { when: format!("end of phase {:?}", mir_phase), mir_phase }
.run_pass(tcx, source, body);
}
}
}
// N.B., this `borrow()` is guaranteed to be valid (i.e., the value
- // cannot yet be stolen), because `mir_validated()`, which steals
+ // cannot yet be stolen), because `mir_promoted()`, which steals
// from `mir_const(), forces this query to execute before
// performing the steal.
let body = &tcx.mir_const(def).borrow();
tcx.alloc_steal_mir(body)
}
-fn mir_validated(
+fn mir_promoted(
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
) -> (&'tcx Steal<Body<'tcx>>, &'tcx Steal<IndexVec<Promoted, Body<'tcx>>>) {
if let Some(def) = def.try_upgrade(tcx) {
- return tcx.mir_validated(def);
+ return tcx.mir_promoted(def);
}
// Ensure that we compute the `mir_const_qualif` for constants at
&mut body,
InstanceDef::Item(def.to_global()),
None,
- MirPhase::Validated,
+ MirPhase::ConstPromotion,
&[promote, opt_coverage],
);
return tcx.mir_drops_elaborated_and_const_checked(def);
}
- // (Mir-)Borrowck uses `mir_validated`, so we have to force it to
+ // (Mir-)Borrowck uses `mir_promoted`, so we have to force it to
// execute before we can steal.
if let Some(param_did) = def.const_param_did {
tcx.ensure().mir_borrowck_const_arg((def.did, param_did));
tcx.ensure().mir_borrowck(def.did);
}
- let (body, _) = tcx.mir_validated(def);
+ let (body, _) = tcx.mir_promoted(def);
let mut body = body.steal();
run_post_borrowck_cleanup_passes(tcx, &mut body, def.did, None);
body,
InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
promoted,
- MirPhase::DropElab,
+ MirPhase::DropLowering,
&[post_borrowck_cleanup],
);
}
def_id: LocalDefId,
promoted: Option<Promoted>,
) {
- let optimizations: &[&dyn MirPass<'tcx>] = &[
+ let mir_opt_level = tcx.sess.opts.debugging_opts.mir_opt_level;
+
+ // Lowering generator control-flow and variables has to happen before we do anything else
+ // to them. We run some optimizations before that, because they may be harder to do on the state
+ // machine than on MIR with async primitives.
+ let optimizations_with_generators: &[&dyn MirPass<'tcx>] = &[
&unreachable_prop::UnreachablePropagation,
&uninhabited_enum_branching::UninhabitedEnumBranching,
&simplify::SimplifyCfg::new("after-uninhabited-enum-branching"),
&inline::Inline,
- // Lowering generator control-flow and variables has to happen before we do anything else
- // to them. We do this inside the "optimizations" block so that it can benefit from
- // optimizations that run before, that might be harder to do on the state machine than MIR
- // with async primitives.
&generator::StateTransform,
+ ];
+
+ // Even if we don't do optimizations, we still have to lower generators for codegen.
+ let no_optimizations_with_generators: &[&dyn MirPass<'tcx>] = &[&generator::StateTransform];
+
+ // The main optimizations that we do on MIR.
+ let optimizations: &[&dyn MirPass<'tcx>] = &[
&instcombine::InstCombine,
&match_branches::MatchBranchSimplification,
&const_prop::ConstProp,
&simplify::SimplifyLocals,
];
+ // Optimizations to run even if mir optimizations have been disabled.
let no_optimizations: &[&dyn MirPass<'tcx>] = &[
- // Even if we don't do optimizations, we still have to lower generators for codegen.
- &generator::StateTransform,
// FIXME(#70073): This pass is responsible for both optimization as well as some lints.
&const_prop::ConstProp,
];
+ // Some cleanup necessary at least for LLVM and potentially other codegen backends.
let pre_codegen_cleanup: &[&dyn MirPass<'tcx>] = &[
&add_call_guards::CriticalCallEdges,
// Dump the end result for testing and debugging purposes.
&dump_mir::Marker("PreCodegen"),
];
- let mir_opt_level = tcx.sess.opts.debugging_opts.mir_opt_level;
+ // End of pass declarations, now actually run the passes.
+ // Generator Lowering
+ #[rustfmt::skip]
+ run_passes(
+ tcx,
+ body,
+ InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id())),
+ promoted,
+ MirPhase::GeneratorLowering,
+ &[
+ if mir_opt_level > 0 {
+ optimizations_with_generators
+ } else {
+ no_optimizations_with_generators
+ }
+ ],
+ );
+ // Main optimization passes
#[rustfmt::skip]
run_passes(
tcx,
} else {
tcx.ensure().mir_borrowck(def.did);
}
- let (_, promoted) = tcx.mir_validated(def);
+ let (_, promoted) = tcx.mir_promoted(def);
let mut promoted = promoted.steal();
for (p, mut body) in promoted.iter_enumerated_mut() {
use rustc_middle::mir::visit::Visitor;
use rustc_middle::{
mir::{
- BasicBlock, Body, Location, Operand, Rvalue, Statement, StatementKind, Terminator,
- TerminatorKind,
+ AggregateKind, BasicBlock, Body, Location, MirPhase, Operand, Rvalue, Statement,
+ StatementKind, Terminator, TerminatorKind,
},
ty::{
self,
pub struct Validator {
/// Describes at which point in the pipeline this validation is happening.
pub when: String,
+ /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+ /// element, this validator will now emit errors if that specific element is encountered.
+ /// Note that phases that change the dialect cause all *following* phases to check the
+ /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+ /// itself.
+ pub mir_phase: MirPhase,
}
impl<'tcx> MirPass<'tcx> for Validator {
fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
let param_env = tcx.param_env(source.def_id());
- TypeChecker { when: &self.when, source, body, tcx, param_env }.visit_body(body);
+ let mir_phase = self.mir_phase;
+ TypeChecker { when: &self.when, source, body, tcx, param_env, mir_phase }.visit_body(body);
}
}
body: &'a Body<'tcx>,
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
+ mir_phase: MirPhase,
}
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
self.fail(
location,
format!(
- "encountered `Assign` statement with incompatible types:\n\
+ "encountered `{:?}` with incompatible types:\n\
left-hand side has type: {}\n\
right-hand side has type: {}",
- left_ty, right_ty,
+ statement.kind, left_ty, right_ty,
),
);
}
- // The sides of an assignment must not alias. Currently this just checks whether the places
- // are identical.
match rvalue {
+ // The sides of an assignment must not alias. Currently this just checks whether the places
+ // are identical.
Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) => {
if dest == src {
self.fail(
);
}
}
+ // The deaggregator currently does not deaggreagate arrays.
+ // So for now, we ignore them here.
+ Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
+ // All other aggregates must be gone after some phases.
+ Rvalue::Aggregate(box kind, _) => {
+ if self.mir_phase > MirPhase::DropLowering
+ && !matches!(kind, AggregateKind::Generator(..))
+ {
+ // Generators persist until the state machine transformation, but all
+ // other aggregates must have been lowered.
+ self.fail(
+ location,
+ format!("{:?} have been lowered to field assignments", rvalue),
+ )
+ } else if self.mir_phase > MirPhase::GeneratorLowering {
+ // No more aggregates after drop and generator lowering.
+ self.fail(
+ location,
+ format!("{:?} have been lowered to field assignments", rvalue),
+ )
+ }
+ }
_ => {}
}
}
}
}
TerminatorKind::DropAndReplace { target, unwind, .. } => {
+ if self.mir_phase > MirPhase::DropLowering {
+ self.fail(
+ location,
+ "`DropAndReplace` is not permitted to exist after drop elaboration",
+ );
+ }
self.check_edge(location, *target, EdgeKind::Normal);
if let Some(unwind) = unwind {
self.check_edge(location, *unwind, EdgeKind::Unwind);
}
}
TerminatorKind::Yield { resume, drop, .. } => {
+ if self.mir_phase > MirPhase::GeneratorLowering {
+ self.fail(location, "`Yield` should have been replaced by generator lowering");
+ }
self.check_edge(location, *resume, EdgeKind::Normal);
if let Some(drop) = drop {
self.check_edge(location, *drop, EdgeKind::Normal);
StorageLive(_1); // scope 0 at $DIR/checked_add.rs:5:9: 5:10
- _2 = CheckedAdd(const 1_u32, const 1_u32); // scope 0 at $DIR/checked_add.rs:5:18: 5:23
- assert(!move (_2.1: bool), "attempt to compute `{} + {}` which would overflow", const 1_u32, const 1_u32) -> bb1; // scope 0 at $DIR/checked_add.rs:5:18: 5:23
-+ _2 = (const 2_u32, const false); // scope 0 at $DIR/checked_add.rs:5:18: 5:23
++ _2 = const (2_u32, false); // scope 0 at $DIR/checked_add.rs:5:18: 5:23
++ // ty::Const
++ // + ty: (u32, bool)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [2, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/checked_add.rs:5:18: 5:23
++ // + literal: Const { ty: (u32, bool), val: Value(ByRef { alloc: Allocation { bytes: [2, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
+ assert(!const false, "attempt to compute `{} + {}` which would overflow", const 1_u32, const 1_u32) -> bb1; // scope 0 at $DIR/checked_add.rs:5:18: 5:23
}
- _3 = CheckedAdd(_2, const 1_u8); // scope 0 at $DIR/indirect.rs:5:13: 5:29
- assert(!move (_3.1: bool), "attempt to compute `{} + {}` which would overflow", move _2, const 1_u8) -> bb1; // scope 0 at $DIR/indirect.rs:5:13: 5:29
+ _2 = const 2_u8; // scope 0 at $DIR/indirect.rs:5:13: 5:25
-+ _3 = (const 3_u8, const false); // scope 0 at $DIR/indirect.rs:5:13: 5:29
++ _3 = const (3_u8, false); // scope 0 at $DIR/indirect.rs:5:13: 5:29
++ // ty::Const
++ // + ty: (u8, bool)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [3, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [3], len: Size { raw: 2 } }, size: Size { raw: 2 }, align: Align { pow2: 0 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/indirect.rs:5:13: 5:29
++ // + literal: Const { ty: (u8, bool), val: Value(ByRef { alloc: Allocation { bytes: [3, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [3], len: Size { raw: 2 } }, size: Size { raw: 2 }, align: Align { pow2: 0 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
+ assert(!const false, "attempt to compute `{} + {}` which would overflow", const 2_u8, const 1_u8) -> bb1; // scope 0 at $DIR/indirect.rs:5:13: 5:29
}
(_3.0: u8) = const 1_u8; // scope 0 at $DIR/issue-67019.rs:11:11: 11:17
(_3.1: u8) = const 2_u8; // scope 0 at $DIR/issue-67019.rs:11:11: 11:17
- (_2.0: (u8, u8)) = move _3; // scope 0 at $DIR/issue-67019.rs:11:10: 11:19
-+ (_2.0: (u8, u8)) = (const 1_u8, const 2_u8); // scope 0 at $DIR/issue-67019.rs:11:10: 11:19
++ (_2.0: (u8, u8)) = const (1_u8, 2_u8); // scope 0 at $DIR/issue-67019.rs:11:10: 11:19
++ // ty::Const
++ // + ty: (u8, u8)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [1, 2], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [3], len: Size { raw: 2 } }, size: Size { raw: 2 }, align: Align { pow2: 0 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/issue-67019.rs:11:10: 11:19
++ // + literal: Const { ty: (u8, u8), val: Value(ByRef { alloc: Allocation { bytes: [1, 2], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [3], len: Size { raw: 2 } }, size: Size { raw: 2 }, align: Align { pow2: 0 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
StorageDead(_3); // scope 0 at $DIR/issue-67019.rs:11:18: 11:19
_1 = const test(move _2) -> bb1; // scope 0 at $DIR/issue-67019.rs:11:5: 11:20
// ty::Const
(_1.1: i32) = const 99_i32; // scope 1 at $DIR/mutable_variable_aggregate.rs:6:5: 6:13
StorageLive(_2); // scope 1 at $DIR/mutable_variable_aggregate.rs:7:9: 7:10
- _2 = _1; // scope 1 at $DIR/mutable_variable_aggregate.rs:7:13: 7:14
-+ _2 = (const 42_i32, const 99_i32); // scope 1 at $DIR/mutable_variable_aggregate.rs:7:13: 7:14
++ _2 = const (42_i32, 99_i32); // scope 1 at $DIR/mutable_variable_aggregate.rs:7:13: 7:14
++ // ty::Const
++ // + ty: (i32, i32)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [42, 0, 0, 0, 99, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/mutable_variable_aggregate.rs:7:13: 7:14
++ // + literal: Const { ty: (i32, i32), val: Value(ByRef { alloc: Allocation { bytes: [42, 0, 0, 0, 99, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
_0 = const (); // scope 0 at $DIR/mutable_variable_aggregate.rs:4:11: 8:2
// ty::Const
// + ty: ()
StorageLive(_1); // scope 0 at $DIR/optimizes_into_variable.rs:12:9: 12:10
- _2 = CheckedAdd(const 2_i32, const 2_i32); // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
- assert(!move (_2.1: bool), "attempt to compute `{} + {}` which would overflow", const 2_i32, const 2_i32) -> bb1; // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
-+ _2 = (const 4_i32, const false); // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
++ _2 = const (4_i32, false); // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
++ // ty::Const
++ // + ty: (i32, bool)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/optimizes_into_variable.rs:12:13: 12:18
++ // + literal: Const { ty: (i32, bool), val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
+ assert(!const false, "attempt to compute `{} + {}` which would overflow", const 2_i32, const 2_i32) -> bb1; // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
}
StorageLive(_1); // scope 0 at $DIR/optimizes_into_variable.rs:12:9: 12:10
- _2 = CheckedAdd(const 2_i32, const 2_i32); // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
- assert(!move (_2.1: bool), "attempt to compute `{} + {}` which would overflow", const 2_i32, const 2_i32) -> bb1; // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
-+ _2 = (const 4_i32, const false); // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
++ _2 = const (4_i32, false); // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
++ // ty::Const
++ // + ty: (i32, bool)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/optimizes_into_variable.rs:12:13: 12:18
++ // + literal: Const { ty: (i32, bool), val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
+ assert(!const false, "attempt to compute `{} + {}` which would overflow", const 2_i32, const 2_i32) -> bb1; // scope 0 at $DIR/optimizes_into_variable.rs:12:13: 12:18
}
bb0: {
- _1 = CheckedAdd(const 2_u32, const 2_u32); // scope 0 at $DIR/return_place.rs:6:5: 6:10
- assert(!move (_1.1: bool), "attempt to compute `{} + {}` which would overflow", const 2_u32, const 2_u32) -> bb1; // scope 0 at $DIR/return_place.rs:6:5: 6:10
-+ _1 = (const 4_u32, const false); // scope 0 at $DIR/return_place.rs:6:5: 6:10
++ _1 = const (4_u32, false); // scope 0 at $DIR/return_place.rs:6:5: 6:10
++ // ty::Const
++ // + ty: (u32, bool)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/return_place.rs:6:5: 6:10
++ // + literal: Const { ty: (u32, bool), val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [31], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
+ assert(!const false, "attempt to compute `{} + {}` which would overflow", const 2_u32, const 2_u32) -> bb1; // scope 0 at $DIR/return_place.rs:6:5: 6:10
}
StorageLive(_2); // scope 1 at $DIR/tuple_literal_propagation.rs:5:5: 5:15
StorageLive(_3); // scope 1 at $DIR/tuple_literal_propagation.rs:5:13: 5:14
- _3 = _1; // scope 1 at $DIR/tuple_literal_propagation.rs:5:13: 5:14
-+ _3 = (const 1_u32, const 2_u32); // scope 1 at $DIR/tuple_literal_propagation.rs:5:13: 5:14
++ _3 = const (1_u32, 2_u32); // scope 1 at $DIR/tuple_literal_propagation.rs:5:13: 5:14
++ // ty::Const
++ // + ty: (u32, u32)
++ // + val: Value(ByRef { alloc: Allocation { bytes: [1, 0, 0, 0, 2, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } })
++ // mir::Constant
++ // + span: $DIR/tuple_literal_propagation.rs:5:13: 5:14
++ // + literal: Const { ty: (u32, u32), val: Value(ByRef { alloc: Allocation { bytes: [1, 0, 0, 0, 2, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, size: Size { raw: 8 }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) }
_2 = const consume(move _3) -> bb1; // scope 1 at $DIR/tuple_literal_propagation.rs:5:5: 5:15
// ty::Const
// + ty: fn((u32, u32)) {consume}
-// compile-flags: -Zmir-opt-level=0 -Zvalidate-mir
+// compile-flags: -Zmir-opt-level=0
// Tests that the `<fn() as Fn>` shim does not create a `Call` terminator with a `Self` callee
// (as only `FnDef` and `FnPtr` callees are allowed in MIR).
bb2: {
StorageLive(_6); // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
StorageLive(_7); // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
- _0 = std::ops::GeneratorState::<(), ()>::Yielded(move _7); // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
+ ((_0 as Yielded).0: ()) = move _7; // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
+ discriminant(_0) = 0; // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
discriminant((*(_1.0: &mut [generator@$DIR/generator-tiny.rs:19:16: 25:6 {u8, HasDrop, ()}]))) = 3; // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
return; // scope 1 at $DIR/generator-tiny.rs:22:13: 22:18
}
rustc.args(&[
"-Zdump-mir=all",
"-Zmir-opt-level=3",
+ "-Zvalidate-mir",
"-Zdump-mir-exclude-pass-number",
]);