};
if is_consume {
let base_ty =
- mir::Place::ty_from(place_ref.local, proj_base, *self.fx.mir, cx.tcx());
+ mir::Place::ty_from(place_ref.local, proj_base, self.fx.mir, cx.tcx());
let base_ty = self.fx.monomorphize(&base_ty);
// ZSTs don't require any actual memory access.
// a loop.
fn maybe_sideeffect<Bx: BuilderMethods<'a, 'tcx>>(
&self,
- mir: mir::ReadOnlyBodyAndCache<'tcx, 'tcx>,
+ mir: &'tcx mir::Body<'tcx>,
bx: &mut Bx,
targets: &[mir::BasicBlock],
) {
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
) {
- let ty = location.ty(*self.mir, bx.tcx()).ty;
+ let ty = location.ty(self.mir, bx.tcx()).ty;
let ty = self.monomorphize(&ty);
let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
let extra_args = extra_args
.iter()
.map(|op_arg| {
- let op_ty = op_arg.ty(*self.mir, bx.tcx());
+ let op_ty = op_arg.ty(self.mir, bx.tcx());
self.monomorphize(&op_ty)
})
.collect::<Vec<_>>();
pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>,
- mir: mir::ReadOnlyBodyAndCache<'tcx, 'tcx>,
+ mir: &'tcx mir::Body<'tcx>,
debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
.collect();
let (landing_pads, funclets) = create_funclets(&mir, &mut bx, &cleanup_kinds, &block_bxs);
- let mir_body: &mir::Body<'_> = *mir;
let mut fx = FunctionCx {
instance,
mir,
let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
let mut allocate_local = |local| {
- let decl = &mir_body.local_decls[local];
+ let decl = &mir.local_decls[local];
let layout = bx.layout_of(fx.monomorphize(&decl.ty));
assert!(!layout.ty.has_erasable_regions());
let retptr = allocate_local(mir::RETURN_PLACE);
iter::once(retptr)
.chain(args.into_iter())
- .chain(mir_body.vars_and_temps_iter().map(allocate_local))
+ .chain(mir.vars_and_temps_iter().map(allocate_local))
.collect()
};
bx.br(fx.blocks[mir::START_BLOCK]);
}
- let rpo = traversal::reverse_postorder(&mir_body);
- let mut visited = BitSet::new_empty(mir_body.basic_blocks().len());
+ let rpo = traversal::reverse_postorder(&mir);
+ let mut visited = BitSet::new_empty(mir.basic_blocks().len());
// Codegen the body of each block using reverse postorder
for (bb, _) in rpo {
// Remove blocks that haven't been visited, or have no
// predecessors.
- for bb in mir_body.basic_blocks().indices() {
+ for bb in mir.basic_blocks().indices() {
// Unreachable block
if !visited.contains(bb.index()) {
debug!("codegen_mir: block {:?} was not visited", bb);
pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx();
- let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, *self.mir, tcx);
+ let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, self.mir, tcx);
self.monomorphize(&place_ty.ty)
}
}
}
mir::Rvalue::Discriminant(ref place) => {
- let discr_ty = rvalue.ty(*self.mir, bx.tcx());
+ let discr_ty = rvalue.ty(self.mir, bx.tcx());
let discr = self
.codegen_place(&mut bx, place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
- let ty = rvalue.ty(*self.mir, self.cx.tcx());
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
let operand =
OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty)));
(bx, operand)
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
- let ty = rvalue.ty(*self.mir, self.cx.tcx());
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(&ty);
self.cx.spanned_layout_of(ty, span).is_zst()
}
use rustc_middle::middle::cstore::{ForeignModule, LinkagePreference, NativeLibrary};
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel};
use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState};
-use rustc_middle::mir::{self, interpret, BodyAndCache, Promoted};
+use rustc_middle::mir::{self, interpret, Body, Promoted};
use rustc_middle::ty::codec::TyDecoder;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::util::common::record_time;
!self.is_proc_macro(id) && self.root.tables.mir.get(self, id).is_some()
}
- fn get_optimized_mir(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> BodyAndCache<'tcx> {
- let mut cache = self
- .root
+ fn get_optimized_mir(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> Body<'tcx> {
+ self.root
.tables
.mir
.get(self, id)
.unwrap_or_else(|| {
bug!("get_optimized_mir: missing MIR for `{:?}`", self.local_def_id(id))
})
- .decode((self, tcx));
- cache.ensure_predecessors();
- cache
+ .decode((self, tcx))
}
- fn get_promoted_mir(
- &self,
- tcx: TyCtxt<'tcx>,
- id: DefIndex,
- ) -> IndexVec<Promoted, BodyAndCache<'tcx>> {
- let mut cache = self
- .root
+ fn get_promoted_mir(&self, tcx: TyCtxt<'tcx>, id: DefIndex) -> IndexVec<Promoted, Body<'tcx>> {
+ self.root
.tables
.promoted_mir
.get(self, id)
.unwrap_or_else(|| {
bug!("get_promoted_mir: missing MIR for `{:?}`", self.local_def_id(id))
})
- .decode((self, tcx));
- for body in cache.iter_mut() {
- body.ensure_predecessors();
- }
- cache
+ .decode((self, tcx))
}
fn mir_const_qualif(&self, id: DefIndex) -> mir::ConstQualifs {
impl<'tcx> Encoder for EncodeContext<'tcx> {
type Error = <opaque::Encoder as Encoder>::Error;
+ #[inline]
fn emit_unit(&mut self) -> Result<(), Self::Error> {
Ok(())
}
// Also, as an optimization, a missing entry indicates an empty `&[]`.
inferred_outlives: Table<DefIndex, Lazy!(&'tcx [(ty::Predicate<'tcx>, Span)])>,
super_predicates: Table<DefIndex, Lazy!(ty::GenericPredicates<'tcx>)>,
- mir: Table<DefIndex, Lazy!(mir::BodyAndCache<'tcx>)>,
- promoted_mir: Table<DefIndex, Lazy!(IndexVec<mir::Promoted, mir::BodyAndCache<'tcx>>)>,
+ mir: Table<DefIndex, Lazy!(mir::Body<'tcx>)>,
+ promoted_mir: Table<DefIndex, Lazy!(IndexVec<mir::Promoted, mir::Body<'tcx>>)>,
}
#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
[] generics: rustc_middle::ty::Generics,
[] trait_def: rustc_middle::ty::TraitDef,
[] adt_def: rustc_middle::ty::AdtDef,
- [] steal_mir: rustc_middle::ty::steal::Steal<rustc_middle::mir::BodyAndCache<$tcx>>,
- [] mir: rustc_middle::mir::BodyAndCache<$tcx>,
+ [] steal_mir: rustc_middle::ty::steal::Steal<rustc_middle::mir::Body<$tcx>>,
+ [] mir: rustc_middle::mir::Body<$tcx>,
[] steal_promoted: rustc_middle::ty::steal::Steal<
rustc_index::vec::IndexVec<
rustc_middle::mir::Promoted,
- rustc_middle::mir::BodyAndCache<$tcx>
+ rustc_middle::mir::Body<$tcx>
>
>,
[] promoted: rustc_index::vec::IndexVec<
rustc_middle::mir::Promoted,
- rustc_middle::mir::BodyAndCache<$tcx>
+ rustc_middle::mir::Body<$tcx>
>,
[decode] tables: rustc_middle::ty::TypeckTables<$tcx>,
[decode] borrowck_result: rustc_middle::mir::BorrowCheckResult<$tcx>,
+++ /dev/null
-use crate::ich::StableHashingContext;
-use crate::mir::{BasicBlock, BasicBlockData, Body, LocalDecls, Location, Successors};
-use rustc_data_structures::graph::dominators::{dominators, Dominators};
-use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors};
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_index::vec::IndexVec;
-use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
-use smallvec::SmallVec;
-use std::iter;
-use std::ops::{Deref, DerefMut, Index, IndexMut};
-use std::vec::IntoIter;
-
-#[derive(Clone, Debug)]
-pub struct Cache {
- // Typically 95%+ of the inner vectors have 4 or fewer elements.
- predecessors: Option<IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>>>,
-}
-
-impl rustc_serialize::Encodable for Cache {
- fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
- Encodable::encode(&(), s)
- }
-}
-
-impl rustc_serialize::Decodable for Cache {
- fn decode<D: Decoder>(d: &mut D) -> Result<Self, D::Error> {
- Decodable::decode(d).map(|_v: ()| Self::new())
- }
-}
-
-impl<'a> HashStable<StableHashingContext<'a>> for Cache {
- fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
- // Do nothing.
- }
-}
-
-impl Cache {
- pub fn new() -> Self {
- Self { predecessors: None }
- }
-
- pub fn invalidate_predecessors(&mut self) {
- // FIXME: consider being more fine-grained
- self.predecessors = None;
- }
-
- pub fn ensure_predecessors(&mut self, body: &Body<'_>) {
- if self.predecessors.is_none() {
- let mut result = IndexVec::from_elem(smallvec![], body.basic_blocks());
- for (bb, data) in body.basic_blocks().iter_enumerated() {
- if let Some(ref term) = data.terminator {
- for &tgt in term.successors() {
- result[tgt].push(bb);
- }
- }
- }
-
- self.predecessors = Some(result)
- }
- }
-
- /// This will recompute the predecessors cache if it is not available
- fn predecessors(
- &mut self,
- body: &Body<'_>,
- ) -> &IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>> {
- self.ensure_predecessors(body);
- self.predecessors.as_ref().unwrap()
- }
-
- fn unwrap_predecessors_for(&self, bb: BasicBlock) -> &[BasicBlock] {
- &self.predecessors.as_ref().unwrap()[bb]
- }
-
- fn unwrap_predecessor_locations<'a>(
- &'a self,
- loc: Location,
- body: &'a Body<'a>,
- ) -> impl Iterator<Item = Location> + 'a {
- let if_zero_locations = if loc.statement_index == 0 {
- let predecessor_blocks = self.unwrap_predecessors_for(loc.block);
- let num_predecessor_blocks = predecessor_blocks.len();
- Some(
- (0..num_predecessor_blocks)
- .map(move |i| predecessor_blocks[i])
- .map(move |bb| body.terminator_loc(bb)),
- )
- } else {
- None
- };
-
- let if_not_zero_locations = if loc.statement_index == 0 {
- None
- } else {
- Some(Location { block: loc.block, statement_index: loc.statement_index - 1 })
- };
-
- if_zero_locations.into_iter().flatten().chain(if_not_zero_locations)
- }
-
- pub fn basic_blocks_mut<'a, 'tcx>(
- &mut self,
- body: &'a mut Body<'tcx>,
- ) -> &'a mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
- debug!("bbm: Clearing predecessors cache for body at: {:?}", body.span.data());
- self.invalidate_predecessors();
- &mut body.basic_blocks
- }
-
- pub fn basic_blocks_and_local_decls_mut<'a, 'tcx>(
- &mut self,
- body: &'a mut Body<'tcx>,
- ) -> (&'a mut IndexVec<BasicBlock, BasicBlockData<'tcx>>, &'a mut LocalDecls<'tcx>) {
- debug!("bbaldm: Clearing predecessors cache for body at: {:?}", body.span.data());
- self.invalidate_predecessors();
- (&mut body.basic_blocks, &mut body.local_decls)
- }
-}
-
-#[derive(Clone, Debug, HashStable, RustcEncodable, RustcDecodable, TypeFoldable)]
-pub struct BodyAndCache<'tcx> {
- body: Body<'tcx>,
- cache: Cache,
-}
-
-impl BodyAndCache<'tcx> {
- pub fn new(body: Body<'tcx>) -> Self {
- Self { body, cache: Cache::new() }
- }
-}
-
-#[macro_export]
-macro_rules! read_only {
- ($body:expr) => {{
- $body.ensure_predecessors();
- $body.unwrap_read_only()
- }};
-}
-
-impl BodyAndCache<'tcx> {
- pub fn ensure_predecessors(&mut self) {
- self.cache.ensure_predecessors(&self.body);
- }
-
- pub fn predecessors(&mut self) -> &IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>> {
- self.cache.predecessors(&self.body)
- }
-
- pub fn unwrap_read_only(&self) -> ReadOnlyBodyAndCache<'_, 'tcx> {
- ReadOnlyBodyAndCache::new(&self.body, &self.cache)
- }
-
- pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
- self.cache.basic_blocks_mut(&mut self.body)
- }
-
- pub fn basic_blocks_and_local_decls_mut(
- &mut self,
- ) -> (&mut IndexVec<BasicBlock, BasicBlockData<'tcx>>, &mut LocalDecls<'tcx>) {
- self.cache.basic_blocks_and_local_decls_mut(&mut self.body)
- }
-}
-
-impl<'tcx> Index<BasicBlock> for BodyAndCache<'tcx> {
- type Output = BasicBlockData<'tcx>;
-
- fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
- &self.body[index]
- }
-}
-
-impl<'tcx> IndexMut<BasicBlock> for BodyAndCache<'tcx> {
- fn index_mut(&mut self, index: BasicBlock) -> &mut Self::Output {
- &mut self.basic_blocks_mut()[index]
- }
-}
-
-impl<'tcx> Deref for BodyAndCache<'tcx> {
- type Target = Body<'tcx>;
-
- fn deref(&self) -> &Self::Target {
- &self.body
- }
-}
-
-impl<'tcx> DerefMut for BodyAndCache<'tcx> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.body
- }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct ReadOnlyBodyAndCache<'a, 'tcx> {
- body: &'a Body<'tcx>,
- cache: &'a Cache,
-}
-
-impl ReadOnlyBodyAndCache<'a, 'tcx> {
- fn new(body: &'a Body<'tcx>, cache: &'a Cache) -> Self {
- assert!(
- cache.predecessors.is_some(),
- "Cannot construct ReadOnlyBodyAndCache without computed predecessors"
- );
- Self { body, cache }
- }
-
- pub fn predecessors(&self) -> &IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>> {
- self.cache.predecessors.as_ref().unwrap()
- }
-
- pub fn predecessors_for(&self, bb: BasicBlock) -> &[BasicBlock] {
- self.cache.unwrap_predecessors_for(bb)
- }
-
- pub fn predecessor_locations(&self, loc: Location) -> impl Iterator<Item = Location> + '_ {
- self.cache.unwrap_predecessor_locations(loc, self.body)
- }
-
- pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
- &self.body.basic_blocks
- }
-
- pub fn dominators(&self) -> Dominators<BasicBlock> {
- dominators(self)
- }
-}
-
-impl graph::DirectedGraph for ReadOnlyBodyAndCache<'a, 'tcx> {
- type Node = BasicBlock;
-}
-
-impl graph::GraphPredecessors<'graph> for ReadOnlyBodyAndCache<'a, 'tcx> {
- type Item = BasicBlock;
- type Iter = IntoIter<BasicBlock>;
-}
-
-impl graph::WithPredecessors for ReadOnlyBodyAndCache<'a, 'tcx> {
- fn predecessors(&self, node: Self::Node) -> <Self as GraphPredecessors<'_>>::Iter {
- self.cache.unwrap_predecessors_for(node).to_vec().into_iter()
- }
-}
-
-impl graph::WithNumNodes for ReadOnlyBodyAndCache<'a, 'tcx> {
- fn num_nodes(&self) -> usize {
- self.body.num_nodes()
- }
-}
-
-impl graph::WithStartNode for ReadOnlyBodyAndCache<'a, 'tcx> {
- fn start_node(&self) -> Self::Node {
- self.body.start_node()
- }
-}
-
-impl graph::WithSuccessors for ReadOnlyBodyAndCache<'a, 'tcx> {
- fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
- self.body.successors(node)
- }
-}
-
-impl<'a, 'b, 'tcx> graph::GraphSuccessors<'b> for ReadOnlyBodyAndCache<'a, 'tcx> {
- type Item = BasicBlock;
- type Iter = iter::Cloned<Successors<'b>>;
-}
-
-impl Deref for ReadOnlyBodyAndCache<'a, 'tcx> {
- type Target = &'a Body<'tcx>;
-
- fn deref(&self) -> &Self::Target {
- &self.body
- }
-}
-
-CloneTypeFoldableAndLiftImpls! {
- Cache,
-}
pub use rustc_ast::ast::Mutability;
use rustc_ast::ast::Name;
use rustc_data_structures::fx::FxHashSet;
-use rustc_data_structures::graph::dominators::Dominators;
+use rustc_data_structures::graph::dominators::{dominators, Dominators};
use rustc_data_structures::graph::{self, GraphSuccessors};
+use rustc_data_structures::sync::MappedLockGuard;
use rustc_index::bit_set::BitMatrix;
use rustc_index::vec::{Idx, IndexVec};
use rustc_macros::HashStable;
use rustc_serialize::{Decodable, Encodable};
use rustc_span::symbol::Symbol;
use rustc_span::{Span, DUMMY_SP};
+use smallvec::SmallVec;
use std::borrow::Cow;
use std::fmt::{self, Debug, Display, Formatter, Write};
-use std::ops::Index;
+use std::ops::{Index, IndexMut};
use std::slice;
use std::{iter, mem, option};
-pub use self::cache::{BodyAndCache, ReadOnlyBodyAndCache};
+use self::predecessors::{PredecessorCache, Predecessors};
pub use self::query::*;
-pub use crate::read_only;
-mod cache;
pub mod interpret;
pub mod mono;
+mod predecessors;
mod query;
pub mod tcx;
pub mod traversal;
pub yield_ty: Option<Ty<'tcx>>,
/// Generator drop glue.
- pub generator_drop: Option<Box<BodyAndCache<'tcx>>>,
+ pub generator_drop: Option<Box<Body<'tcx>>>,
/// The layout of a generator. Produced by the state transformation.
pub generator_layout: Option<GeneratorLayout<'tcx>>,
/// implementation without the flag hid this situation silently.
/// FIXME(oli-obk): rewrite the promoted during promotion to eliminate the cell components.
pub ignore_interior_mut_in_const_validation: bool,
+
+ pub predecessor_cache: PredecessorCache,
}
impl<'tcx> Body<'tcx> {
span,
ignore_interior_mut_in_const_validation: false,
control_flow_destroyed,
+ predecessor_cache: PredecessorCache::new(),
}
}
generator_kind: None,
var_debug_info: Vec::new(),
ignore_interior_mut_in_const_validation: false,
+ predecessor_cache: PredecessorCache::new(),
}
}
&self.basic_blocks
}
+ #[inline]
+ pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ // Because the user could mutate basic block terminators via this reference, we need to
+ // invalidate the predecessor cache.
+ //
+ // FIXME: Use a finer-grained API for this, so only transformations that alter terminators
+ // invalidate the predecessor cache.
+ self.predecessor_cache.invalidate();
+ &mut self.basic_blocks
+ }
+
+ #[inline]
+ pub fn basic_blocks_and_local_decls_mut(
+ &mut self,
+ ) -> (&mut IndexVec<BasicBlock, BasicBlockData<'tcx>>, &mut LocalDecls<'tcx>) {
+ self.predecessor_cache.invalidate();
+ (&mut self.basic_blocks, &mut self.local_decls)
+ }
+
/// Returns `true` if a cycle exists in the control-flow graph that is reachable from the
/// `START_BLOCK`.
pub fn is_cfg_cyclic(&self) -> bool {
}
/// Returns the return type; it always return first element from `local_decls` array.
+ #[inline]
pub fn return_ty(&self) -> Ty<'tcx> {
self.local_decls[RETURN_PLACE].ty
}
/// Gets the location of the terminator for the given block.
+ #[inline]
pub fn terminator_loc(&self, bb: BasicBlock) -> Location {
Location { block: bb, statement_index: self[bb].statements.len() }
}
+
+ #[inline]
+ pub fn predecessors_for(
+ &self,
+ bb: BasicBlock,
+ ) -> impl std::ops::Deref<Target = SmallVec<[BasicBlock; 4]>> + '_ {
+ let predecessors = self.predecessor_cache.compute(&self.basic_blocks);
+ MappedLockGuard::map(predecessors, |preds| &mut preds[bb])
+ }
+
+ #[inline]
+ pub fn predecessors(&self) -> impl std::ops::Deref<Target = Predecessors> + '_ {
+ self.predecessor_cache.compute(&self.basic_blocks)
+ }
+
+ #[inline]
+ pub fn dominators(&self) -> Dominators<BasicBlock> {
+ dominators(self)
+ }
}
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, HashStable)]
}
}
+impl<'tcx> IndexMut<BasicBlock> for Body<'tcx> {
+ #[inline]
+ fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> {
+ &mut self.basic_blocks_mut()[index]
+ }
+}
+
#[derive(Copy, Clone, Debug, HashStable, TypeFoldable)]
pub enum ClearCrossCrate<T> {
Clear,
}
impl<'tcx> graph::WithNumNodes for Body<'tcx> {
+ #[inline]
fn num_nodes(&self) -> usize {
self.basic_blocks.len()
}
}
impl<'tcx> graph::WithStartNode for Body<'tcx> {
+ #[inline]
fn start_node(&self) -> Self::Node {
START_BLOCK
}
}
impl<'tcx> graph::WithSuccessors for Body<'tcx> {
+ #[inline]
fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
self.basic_blocks[node].terminator().successors().cloned()
}
type Iter = iter::Cloned<Successors<'b>>;
}
+impl graph::GraphPredecessors<'graph> for Body<'tcx> {
+ type Item = BasicBlock;
+ type Iter = smallvec::IntoIter<[BasicBlock; 4]>;
+}
+
+impl graph::WithPredecessors for Body<'tcx> {
+ #[inline]
+ fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+ self.predecessors_for(node).clone().into_iter()
+ }
+}
+
/// `Location` represents the position of the start of the statement; or, if
/// `statement_index` equals the number of statements, then the start of the
/// terminator.
}
/// Returns `true` if `other` is earlier in the control flow graph than `self`.
- pub fn is_predecessor_of<'tcx>(
- &self,
- other: Location,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
- ) -> bool {
+ pub fn is_predecessor_of<'tcx>(&self, other: Location, body: &Body<'tcx>) -> bool {
// If we are in the same block as the other location and are an earlier statement
// then we are a predecessor of `other`.
if self.block == other.block && self.statement_index < other.statement_index {
return true;
}
+ let predecessors = body.predecessors();
+
// If we're in another block, then we want to check that block is a predecessor of `other`.
- let mut queue: Vec<BasicBlock> = body.predecessors_for(other.block).to_vec();
+ let mut queue: Vec<BasicBlock> = predecessors[other.block].to_vec();
let mut visited = FxHashSet::default();
while let Some(block) = queue.pop() {
// If we haven't visited this block before, then make sure we visit it's predecessors.
if visited.insert(block) {
- queue.extend(body.predecessors_for(block).iter().cloned());
+ queue.extend(predecessors[block].iter().cloned());
} else {
continue;
}
--- /dev/null
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{Lock, LockGuard, MappedLockGuard};
+use rustc_index::vec::IndexVec;
+use rustc_serialize as serialize;
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData};
+
+// Typically 95%+ of basic blocks have 4 or fewer predecessors.
+pub type Predecessors = IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>>;
+
+#[derive(Clone, Debug)]
+pub struct PredecessorCache {
+ cache: Lock<Option<Predecessors>>,
+}
+
+impl PredecessorCache {
+ #[inline]
+ pub fn new() -> Self {
+ PredecessorCache { cache: Lock::new(None) }
+ }
+
+ #[inline]
+ pub fn invalidate(&mut self) {
+ *self.cache.get_mut() = None;
+ }
+
+ #[inline]
+ pub fn compute(
+ &self,
+ basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+ ) -> MappedLockGuard<'_, Predecessors> {
+ LockGuard::map(self.cache.lock(), |cache| {
+ cache.get_or_insert_with(|| {
+ let mut preds = IndexVec::from_elem(SmallVec::new(), basic_blocks);
+ for (bb, data) in basic_blocks.iter_enumerated() {
+ if let Some(term) = &data.terminator {
+ for &succ in term.successors() {
+ preds[succ].push(bb);
+ }
+ }
+ }
+
+ preds
+ })
+ })
+ }
+}
+
+impl serialize::Encodable for PredecessorCache {
+ #[inline]
+ fn encode<S: serialize::Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+ serialize::Encodable::encode(&(), s)
+ }
+}
+
+impl serialize::Decodable for PredecessorCache {
+ #[inline]
+ fn decode<D: serialize::Decoder>(d: &mut D) -> Result<Self, D::Error> {
+ serialize::Decodable::decode(d).map(|_v: ()| Self::new())
+ }
+}
+
+impl<CTX> HashStable<CTX> for PredecessorCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+CloneTypeFoldableAndLiftImpls! {
+ PredecessorCache,
+}
// variant argument) that does not require visiting, as in
// `is_cleanup` above.
-macro_rules! body_type {
- (mut $tcx:lifetime) => {
- &mut BodyAndCache<$tcx>
- };
- ($tcx:lifetime) => {
- &Body<$tcx>
- };
-}
-
macro_rules! make_mir_visitor {
($visitor_trait_name:ident, $($mutability:ident)?) => {
pub trait $visitor_trait_name<'tcx> {
fn visit_body(
&mut self,
- body: body_type!($($mutability)? 'tcx)
+ body: &$($mutability)? Body<'tcx>,
) {
self.super_body(body);
}
fn super_body(
&mut self,
- $($mutability)? body: body_type!($($mutability)? 'tcx)
+ body: &$($mutability)? Body<'tcx>,
) {
let span = body.span;
if let Some(yield_ty) = &$($mutability)? body.yield_ty {
self.visit_basic_block_data(bb, data);
}
- let body: & $($mutability)? Body<'_> = & $($mutability)? body;
for scope in &$($mutability)? body.source_scopes {
self.visit_source_scope_data(scope);
}
fn visit_location(
&mut self,
- body: body_type!($($mutability)? 'tcx),
+ body: &$($mutability)? Body<'tcx>,
location: Location
) {
- let basic_block = & $($mutability)? body[location.block];
+ macro_rules! basic_blocks {
+ (mut) => (body.basic_blocks_mut());
+ () => (body.basic_blocks());
+ };
+ let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block];
if basic_block.statements.len() == location.statement_index {
if let Some(ref $($mutability)? terminator) = basic_block.terminator {
self.visit_terminator(terminator, location)
/// Fetch the MIR for a given `DefId` right after it's built - this includes
/// unreachable code.
- query mir_built(_: DefId) -> &'tcx Steal<mir::BodyAndCache<'tcx>> {
+ query mir_built(_: DefId) -> &'tcx Steal<mir::Body<'tcx>> {
desc { "building MIR for" }
}
/// ready for const evaluation.
///
/// See the README for the `mir` module for details.
- query mir_const(_: DefId) -> &'tcx Steal<mir::BodyAndCache<'tcx>> {
+ query mir_const(_: DefId) -> &'tcx Steal<mir::Body<'tcx>> {
no_hash
}
query mir_validated(_: DefId) ->
(
- &'tcx Steal<mir::BodyAndCache<'tcx>>,
- &'tcx Steal<IndexVec<mir::Promoted, mir::BodyAndCache<'tcx>>>
+ &'tcx Steal<mir::Body<'tcx>>,
+ &'tcx Steal<IndexVec<mir::Promoted, mir::Body<'tcx>>>
) {
no_hash
}
/// MIR after our optimization passes have run. This is MIR that is ready
/// for codegen. This is also the only query that can fetch non-local MIR, at present.
- query optimized_mir(key: DefId) -> &'tcx mir::BodyAndCache<'tcx> {
+ query optimized_mir(key: DefId) -> &'tcx mir::Body<'tcx> {
cache_on_disk_if { key.is_local() }
load_cached(tcx, id) {
- let mir: Option<crate::mir::BodyAndCache<'tcx>>
+ let mir: Option<crate::mir::Body<'tcx>>
= tcx.queries.on_disk_cache.try_load_query_result(tcx, id);
- mir.map(|x| {
- let cache = tcx.arena.alloc(x);
- cache.ensure_predecessors();
- &*cache
- })
+ mir.map(|x| &*tcx.arena.alloc(x))
}
}
- query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::BodyAndCache<'tcx>> {
+ query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
cache_on_disk_if { key.is_local() }
load_cached(tcx, id) {
let promoted: Option<
rustc_index::vec::IndexVec<
crate::mir::Promoted,
- crate::mir::BodyAndCache<'tcx>
+ crate::mir::Body<'tcx>
>> = tcx.queries.on_disk_cache.try_load_query_result(tcx, id);
- promoted.map(|p| {
- let cache = tcx.arena.alloc(p);
- for body in cache.iter_mut() {
- body.ensure_predecessors();
- }
- &*cache
- })
+ promoted.map(|p| &*tcx.arena.alloc(p))
}
}
}
/// in the case of closures, this will be redirected to the enclosing function.
query region_scope_tree(_: DefId) -> &'tcx region::ScopeTree {}
- query mir_shims(key: ty::InstanceDef<'tcx>) -> &'tcx mir::BodyAndCache<'tcx> {
+ query mir_shims(key: ty::InstanceDef<'tcx>) -> &'tcx mir::Body<'tcx> {
desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
}
use crate::middle::resolve_lifetime::{self, ObjectLifetimeDefault};
use crate::middle::stability;
use crate::mir::interpret::{Allocation, ConstValue, Scalar};
-use crate::mir::{
- interpret, BodyAndCache, Field, Local, Place, PlaceElem, ProjectionKind, Promoted,
-};
+use crate::mir::{interpret, Body, Field, Local, Place, PlaceElem, ProjectionKind, Promoted};
use crate::traits;
use crate::traits::{Clause, Clauses, Goal, GoalKind, Goals};
use crate::ty::query;
}
impl<'tcx> TyCtxt<'tcx> {
- pub fn alloc_steal_mir(self, mir: BodyAndCache<'tcx>) -> &'tcx Steal<BodyAndCache<'tcx>> {
+ pub fn alloc_steal_mir(self, mir: Body<'tcx>) -> &'tcx Steal<Body<'tcx>> {
self.arena.alloc(Steal::new(mir))
}
pub fn alloc_steal_promoted(
self,
- promoted: IndexVec<Promoted, BodyAndCache<'tcx>>,
- ) -> &'tcx Steal<IndexVec<Promoted, BodyAndCache<'tcx>>> {
+ promoted: IndexVec<Promoted, Body<'tcx>>,
+ ) -> &'tcx Steal<IndexVec<Promoted, Body<'tcx>>> {
self.arena.alloc(Steal::new(promoted))
}
pub fn intern_promoted(
self,
- promoted: IndexVec<Promoted, BodyAndCache<'tcx>>,
- ) -> &'tcx IndexVec<Promoted, BodyAndCache<'tcx>> {
+ promoted: IndexVec<Promoted, Body<'tcx>>,
+ ) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
self.arena.alloc(promoted)
}
use crate::middle::cstore::CrateStoreDyn;
use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
use crate::mir::interpret::ErrorHandled;
+use crate::mir::Body;
use crate::mir::GeneratorLayout;
-use crate::mir::ReadOnlyBodyAndCache;
use crate::traits::{self, Reveal};
use crate::ty;
use crate::ty::subst::{InternalSubsts, Subst, SubstsRef};
}
/// Returns the possibly-auto-generated MIR of a `(DefId, Subst)` pair.
- pub fn instance_mir(self, instance: ty::InstanceDef<'tcx>) -> ReadOnlyBodyAndCache<'tcx, 'tcx> {
+ pub fn instance_mir(self, instance: ty::InstanceDef<'tcx>) -> &'tcx Body<'tcx> {
match instance {
- ty::InstanceDef::Item(did) => self.optimized_mir(did).unwrap_read_only(),
+ ty::InstanceDef::Item(did) => self.optimized_mir(did),
ty::InstanceDef::VtableShim(..)
| ty::InstanceDef::ReifyShim(..)
| ty::InstanceDef::Intrinsic(..)
| ty::InstanceDef::Virtual(..)
| ty::InstanceDef::ClosureOnceShim { .. }
| ty::InstanceDef::DropGlue(..)
- | ty::InstanceDef::CloneShim(..) => self.mir_shims(instance).unwrap_read_only(),
+ | ty::InstanceDef::CloneShim(..) => self.mir_shims(instance),
}
}
{
type Error = E::Error;
+ #[inline]
fn emit_unit(&mut self) -> Result<(), Self::Error> {
Ok(())
}
use rustc_index::vec::IndexVec;
use rustc_middle::mir::traversal;
use rustc_middle::mir::visit::{MutatingUseContext, NonUseContext, PlaceContext, Visitor};
-use rustc_middle::mir::{self, Body, Local, Location, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{self, Body, Local, Location};
use rustc_middle::ty::{RegionVid, TyCtxt};
use std::fmt;
use std::ops::Index;
impl LocalsStateAtExit {
fn build(
locals_are_invalidated_at_exit: bool,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>,
) -> Self {
struct HasStorageDead(BitSet<Local>);
impl<'tcx> BorrowSet<'tcx> {
pub fn build(
tcx: TyCtxt<'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
locals_are_invalidated_at_exit: bool,
move_data: &MoveData<'tcx>,
) -> Self {
+use either::Either;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_hir as hir;
}
let ty =
- Place::ty_from(used_place.local, used_place.projection, *self.body, self.infcx.tcx)
+ Place::ty_from(used_place.local, used_place.projection, self.body, self.infcx.tcx)
.ty;
let needs_note = match ty.kind {
ty::Closure(id, _) => {
let mpi = self.move_data.moves[move_out_indices[0]].path;
let place = &self.move_data.move_paths[mpi].place;
- let ty = place.ty(*self.body, self.infcx.tcx).ty;
+ let ty = place.ty(self.body, self.infcx.tcx).ty;
let opt_name =
self.describe_place_with_options(place.as_ref(), IncludingDowncast(true));
let note_msg = match opt_name {
// Define a small closure that we can use to check if the type of a place
// is a union.
let union_ty = |place_base, place_projection| {
- let ty = Place::ty_from(place_base, place_projection, *self.body, self.infcx.tcx).ty;
+ let ty = Place::ty_from(place_base, place_projection, self.body, self.infcx.tcx).ty;
ty.ty_adt_def().filter(|adt| adt.is_union()).map(|_| ty)
};
}
fn get_moved_indexes(&mut self, location: Location, mpi: MovePathIndex) -> Vec<MoveSite> {
+ fn predecessor_locations(
+ body: &'a mir::Body<'tcx>,
+ location: Location,
+ ) -> impl Iterator<Item = Location> + 'a {
+ if location.statement_index == 0 {
+ let predecessors = body.predecessors_for(location.block).to_vec();
+ Either::Left(predecessors.into_iter().map(move |bb| body.terminator_loc(bb)))
+ } else {
+ Either::Right(std::iter::once(Location {
+ statement_index: location.statement_index - 1,
+ ..location
+ }))
+ }
+ }
+
let mut stack = Vec::new();
- stack.extend(self.body.predecessor_locations(location).map(|predecessor| {
+ stack.extend(predecessor_locations(self.body, location).map(|predecessor| {
let is_back_edge = location.dominates(predecessor, &self.dominators);
(predecessor, is_back_edge)
}));
continue 'dfs;
}
- stack.extend(self.body.predecessor_locations(location).map(|predecessor| {
+ stack.extend(predecessor_locations(self.body, location).map(|predecessor| {
let back_edge = location.dominates(predecessor, &self.dominators);
(predecessor, is_back_edge || back_edge)
}));
StorageDeadOrDrop::LocalStorageDead
| StorageDeadOrDrop::BoxedStorageDead => {
assert!(
- Place::ty_from(place.local, proj_base, *self.body, tcx).ty.is_box(),
+ Place::ty_from(place.local, proj_base, self.body, tcx).ty.is_box(),
"Drop of value behind a reference or raw pointer"
);
StorageDeadOrDrop::BoxedStorageDead
StorageDeadOrDrop::Destructor(_) => base_access,
},
ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => {
- let base_ty = Place::ty_from(place.local, proj_base, *self.body, tcx).ty;
+ let base_ty = Place::ty_from(place.local, proj_base, self.body, tcx).ty;
match base_ty.kind {
ty::Adt(def, _) if def.has_dtor(tcx) => {
// Report the outermost adt with a destructor
}
ProjectionElem::Downcast(_, variant_index) => {
let base_ty =
- Place::ty_from(place.local, place.projection, *self.body, self.infcx.tcx)
- .ty;
+ Place::ty_from(place.local, place.projection, self.body, self.infcx.tcx).ty;
self.describe_field_from_ty(&base_ty, field, Some(*variant_index))
}
ProjectionElem::Field(_, field_type) => {
}) = bbd.terminator
{
if let Some(source) =
- BorrowedContentSource::from_call(func.ty(*self.body, tcx), tcx)
+ BorrowedContentSource::from_call(func.ty(self.body, tcx), tcx)
{
return source;
}
// If we didn't find an overloaded deref or index, then assume it's a
// built in deref and check the type of the base.
- let base_ty = Place::ty_from(deref_base.local, deref_base.projection, *self.body, tcx).ty;
+ let base_ty = Place::ty_from(deref_base.local, deref_base.projection, self.body, tcx).ty;
if base_ty.is_unsafe_ptr() {
BorrowedContentSource::DerefRawPointer
} else if base_ty.is_mutable_ptr() {
// Inspect the type of the content behind the
// borrow to provide feedback about why this
// was a move rather than a copy.
- let ty = deref_target_place.ty(*self.body, self.infcx.tcx).ty;
+ let ty = deref_target_place.ty(self.body, self.infcx.tcx).ty;
let upvar_field = self
.prefixes(move_place.as_ref(), PrefixSet::All)
.find_map(|p| self.is_upvar_field_projection(p));
}
};
if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
- let def_id = match move_place.ty(*self.body, self.infcx.tcx).ty.kind {
+ let def_id = match move_place.ty(self.body, self.infcx.tcx).ty.kind {
ty::Adt(self_def, _) => self_def.did,
ty::Foreign(def_id)
| ty::FnDef(def_id, _)
}
if binds_to.is_empty() {
- let place_ty = move_from.ty(*self.body, self.infcx.tcx).ty;
+ let place_ty = move_from.ty(self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(move_from.as_ref()) {
Some(desc) => format!("`{}`", desc),
None => "value".to_string(),
// No binding. Nothing to suggest.
GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => {
let span = use_spans.var_or_use();
- let place_ty = original_path.ty(*self.body, self.infcx.tcx).ty;
+ let place_ty = original_path.ty(self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(original_path.as_ref()) {
Some(desc) => format!("`{}`", desc),
None => "value".to_string(),
projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
} => {
debug_assert!(is_closure_or_generator(
- Place::ty_from(local, proj_base, *self.body, self.infcx.tcx).ty
+ Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty
));
item_msg = format!("`{}`", access_place_desc.unwrap());
Place::ty_from(
the_place_err.local,
the_place_err.projection,
- *self.body,
+ self.body,
self.infcx.tcx
)
.ty
if let Some((span, message)) = annotate_struct_field(
self.infcx.tcx,
- Place::ty_from(local, proj_base, *self.body, self.infcx.tcx).ty,
+ Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty,
field,
) {
err.span_suggestion(
projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
} => {
debug_assert!(is_closure_or_generator(
- Place::ty_from(local, proj_base, *self.body, self.infcx.tcx).ty
+ Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty
));
err.span_label(span, format!("cannot {ACT}", ACT = act));
use rustc_data_structures::graph::dominators::Dominators;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::TerminatorKind;
-use rustc_middle::mir::{BasicBlock, Body, Location, Place, ReadOnlyBodyAndCache, Rvalue};
+use rustc_middle::mir::{BasicBlock, Body, Location, Place, Rvalue};
use rustc_middle::mir::{BorrowKind, Mutability, Operand};
use rustc_middle::mir::{Statement, StatementKind};
use rustc_middle::ty::TyCtxt;
tcx: TyCtxt<'tcx>,
all_facts: &mut Option<AllFacts>,
location_table: &LocationTable,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
borrow_set: &BorrowSet<'tcx>,
) {
if all_facts.is_none() {
body: &body,
dominators,
};
- ig.visit_body(&body);
+ ig.visit_body(body);
}
}
use rustc_index::vec::IndexVec;
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
use rustc_middle::mir::{
- read_only, traversal, Body, BodyAndCache, ClearCrossCrate, Local, Location, Mutability,
- Operand, Place, PlaceElem, PlaceRef, ReadOnlyBodyAndCache,
+ traversal, Body, ClearCrossCrate, Local, Location, Mutability, Operand, Place, PlaceElem,
+ PlaceRef,
};
use rustc_middle::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
use rustc_middle::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
fn do_mir_borrowck<'a, 'tcx>(
infcx: &InferCtxt<'a, 'tcx>,
input_body: &Body<'tcx>,
- input_promoted: &IndexVec<Promoted, BodyAndCache<'tcx>>,
+ input_promoted: &IndexVec<Promoted, Body<'tcx>>,
def_id: DefId,
) -> BorrowCheckResult<'tcx> {
debug!("do_mir_borrowck(def_id = {:?})", def_id);
// requires first making our own copy of the MIR. This copy will
// be modified (in place) to contain non-lexical lifetimes. It
// will have a lifetime tied to the inference context.
- let body_clone: Body<'tcx> = input_body.clone();
+ let mut body = input_body.clone();
let mut promoted = input_promoted.clone();
- let mut body = BodyAndCache::new(body_clone);
let free_regions =
nll::replace_regions_in_mir(infcx, def_id, param_env, &mut body, &mut promoted);
- let body = read_only!(body); // no further changes
- let promoted: IndexVec<_, _> = promoted.iter_mut().map(|body| read_only!(body)).collect();
+ let body = &body; // no further changes
let location_table = &LocationTable::new(&body);
crate struct MirBorrowckCtxt<'cx, 'tcx> {
crate infcx: &'cx InferCtxt<'cx, 'tcx>,
- body: ReadOnlyBodyAndCache<'cx, 'tcx>,
+ body: &'cx Body<'tcx>,
mir_def_id: DefId,
move_data: &'cx MoveData<'tcx>,
let tcx = self.infcx.tcx;
// Compute the type with accurate region information.
- let drop_place_ty = drop_place.ty(*self.body, self.infcx.tcx);
+ let drop_place_ty = drop_place.ty(self.body, self.infcx.tcx);
// Erase the regions.
let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;
impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
fn body(&self) -> &'cx Body<'tcx> {
- *self.body
+ self.body
}
/// Checks an access to the given place to see if it is allowed. Examines the set of borrows
let mut error_reported = false;
let tcx = self.infcx.tcx;
let body = self.body;
- let body: &Body<'_> = &body;
let borrow_set = self.borrow_set.clone();
// Use polonius output if it has been enabled.
use rustc_index::vec::IndexVec;
use rustc_infer::infer::InferCtxt;
use rustc_middle::mir::{
- BasicBlock, Body, BodyAndCache, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind,
- Location, Promoted, ReadOnlyBodyAndCache,
+ BasicBlock, Body, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind, Location,
+ Promoted,
};
use rustc_middle::ty::{self, RegionKind, RegionVid};
use rustc_span::symbol::sym;
infcx: &InferCtxt<'cx, 'tcx>,
def_id: DefId,
param_env: ty::ParamEnv<'tcx>,
- body: &mut BodyAndCache<'tcx>,
- promoted: &mut IndexVec<Promoted, BodyAndCache<'tcx>>,
+ body: &mut Body<'tcx>,
+ promoted: &mut IndexVec<Promoted, Body<'tcx>>,
) -> UniversalRegions<'tcx> {
debug!("replace_regions_in_mir(def_id={:?})", def_id);
infcx: &InferCtxt<'cx, 'tcx>,
def_id: DefId,
universal_regions: UniversalRegions<'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
- promoted: &IndexVec<Promoted, ReadOnlyBodyAndCache<'_, 'tcx>>,
+ body: &Body<'tcx>,
+ promoted: &IndexVec<Promoted, Body<'tcx>>,
location_table: &LocationTable,
param_env: ty::ParamEnv<'tcx>,
flow_inits: &mut ResultsCursor<'cx, 'tcx, MaybeInitializedPlaces<'cx, 'tcx>>,
use super::MirBorrowckCtxt;
use rustc_hir as hir;
-use rustc_middle::mir::{Place, PlaceRef, ProjectionElem, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{Body, Place, PlaceRef, ProjectionElem};
use rustc_middle::ty::{self, TyCtxt};
pub trait IsPrefixOf<'tcx> {
}
pub(super) struct Prefixes<'cx, 'tcx> {
- body: ReadOnlyBodyAndCache<'cx, 'tcx>,
+ body: &'cx Body<'tcx>,
tcx: TyCtxt<'tcx>,
kind: PrefixSet,
next: Option<PlaceRef<'tcx>>,
// derefs, except we stop at the deref of a shared
// reference.
- let ty = Place::ty_from(cursor.local, proj_base, *self.body, self.tcx).ty;
+ let ty = Place::ty_from(cursor.local, proj_base, self.body, self.tcx).ty;
match ty.kind {
ty::RawPtr(_) | ty::Ref(_ /*rgn*/, _ /*ty*/, hir::Mutability::Not) => {
// don't continue traversing over derefs of raw pointers or shared
use rustc_index::bit_set::{HybridBitSet, SparseBitMatrix};
use rustc_index::vec::Idx;
use rustc_index::vec::IndexVec;
-use rustc_middle::mir::{BasicBlock, Body, Location, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{BasicBlock, Body, Location};
use rustc_middle::ty::{self, RegionVid};
use std::fmt::Debug;
use std::rc::Rc;
/// Pushes all predecessors of `index` onto `stack`.
crate fn push_predecessors(
&self,
- body: ReadOnlyBodyAndCache<'_, '_>,
+ body: &Body<'_>,
index: PointIndex,
stack: &mut Vec<PointIndex>,
) {
use rustc_index::vec::IndexVec;
use rustc_infer::infer::{InferCtxt, NLLRegionVariableOrigin};
use rustc_middle::mir::visit::{MutVisitor, TyContext};
-use rustc_middle::mir::{BodyAndCache, Location, PlaceElem, Promoted};
+use rustc_middle::mir::{Body, Location, PlaceElem, Promoted};
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
/// inference variables, returning the number of variables created.
pub fn renumber_mir<'tcx>(
infcx: &InferCtxt<'_, 'tcx>,
- body: &mut BodyAndCache<'tcx>,
- promoted: &mut IndexVec<Promoted, BodyAndCache<'tcx>>,
+ body: &mut Body<'tcx>,
+ promoted: &mut IndexVec<Promoted, Body<'tcx>>,
) {
debug!("renumber_mir()");
debug!("renumber_mir: body.arg_count={:?}", body.arg_count);
use rustc_data_structures::vec_linked_list as vll;
use rustc_index::vec::IndexVec;
use rustc_middle::mir::visit::{PlaceContext, Visitor};
-use rustc_middle::mir::{Local, Location, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{Body, Local, Location};
use crate::util::liveness::{categorize, DefUse};
crate fn build(
live_locals: &Vec<Local>,
elements: &RegionValueElements,
- body: ReadOnlyBodyAndCache<'_, '_>,
+ body: &Body<'_>,
) -> Self {
let nones = IndexVec::from_elem_n(None, body.local_decls.len());
let mut local_use_map = LocalUseMap {
use rustc_data_structures::fx::FxHashSet;
-use rustc_middle::mir::{Body, Local, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{Body, Local};
use rustc_middle::ty::{RegionVid, TyCtxt};
use std::rc::Rc;
/// performed before
pub(super) fn generate<'mir, 'tcx>(
typeck: &mut TypeChecker<'_, 'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
elements: &Rc<RegionValueElements>,
flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
move_data: &MoveData<'tcx>,
use crate::dataflow::move_paths::{LookupResult, MoveData};
use crate::util::liveness::{categorize, DefUse};
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
-use rustc_middle::mir::{Local, Location, Place, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{Body, Local, Location, Place};
use rustc_middle::ty::subst::GenericArg;
use super::TypeChecker;
pub(super) fn populate_access_facts(
typeck: &mut TypeChecker<'_, 'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
location_table: &LocationTable,
move_data: &MoveData<'_>,
dropped_at: &mut Vec<(Local, Location)>,
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_index::bit_set::HybridBitSet;
use rustc_infer::infer::canonical::QueryRegionConstraints;
-use rustc_middle::mir::{BasicBlock, ConstraintCategory, Local, Location, ReadOnlyBodyAndCache};
+use rustc_middle::mir::{BasicBlock, Body, ConstraintCategory, Local, Location};
use rustc_middle::ty::{Ty, TypeFoldable};
use rustc_trait_selection::traits::query::dropck_outlives::DropckOutlivesResult;
use rustc_trait_selection::traits::query::type_op::outlives::DropckOutlives;
/// this respects `#[may_dangle]` annotations).
pub(super) fn trace(
typeck: &mut TypeChecker<'_, 'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
elements: &Rc<RegionValueElements>,
flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
move_data: &MoveData<'tcx>,
elements: &'me RegionValueElements,
/// MIR we are analyzing.
- body: ReadOnlyBodyAndCache<'me, 'tcx>,
+ body: &'me Body<'tcx>,
/// Mapping to/from the various indices used for initialization tracking.
move_data: &'me MoveData<'tcx>,
pub(crate) fn type_check<'mir, 'tcx>(
infcx: &InferCtxt<'_, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
- promoted: &IndexVec<Promoted, ReadOnlyBodyAndCache<'_, 'tcx>>,
+ body: &Body<'tcx>,
+ promoted: &IndexVec<Promoted, Body<'tcx>>,
mir_def_id: DefId,
universal_regions: &Rc<UniversalRegions<'tcx>>,
location_table: &LocationTable,
infcx: &'a InferCtxt<'a, 'tcx>,
mir_def_id: DefId,
param_env: ty::ParamEnv<'tcx>,
- body: ReadOnlyBodyAndCache<'a, 'tcx>,
- promoted: &'a IndexVec<Promoted, ReadOnlyBodyAndCache<'_, 'tcx>>,
+ body: &'a Body<'tcx>,
+ promoted: &'a IndexVec<Promoted, Body<'tcx>>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
implicit_region_bound: ty::Region<'tcx>,
borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
) -> R {
let mut checker = TypeChecker::new(
infcx,
- *body,
+ body,
mir_def_id,
param_env,
region_bound_pairs,
universal_region_relations,
);
let errors_reported = {
- let mut verifier = TypeVerifier::new(&mut checker, *body, promoted);
+ let mut verifier = TypeVerifier::new(&mut checker, body, promoted);
verifier.visit_body(&body);
verifier.errors_reported
};
struct TypeVerifier<'a, 'b, 'tcx> {
cx: &'a mut TypeChecker<'b, 'tcx>,
body: &'b Body<'tcx>,
- promoted: &'b IndexVec<Promoted, ReadOnlyBodyAndCache<'b, 'tcx>>,
+ promoted: &'b IndexVec<Promoted, Body<'tcx>>,
last_span: Span,
mir_def_id: DefId,
errors_reported: bool,
if let ty::ConstKind::Unevaluated(def_id, substs, promoted) = constant.literal.val {
if let Some(promoted) = promoted {
let check_err = |verifier: &mut TypeVerifier<'a, 'b, 'tcx>,
- promoted: &ReadOnlyBodyAndCache<'_, 'tcx>,
+ promoted: &Body<'tcx>,
ty,
san_ty| {
if let Err(terr) = verifier.cx.eq_types(
};
if !self.errors_reported {
- let promoted_body = self.promoted[promoted];
+ let promoted_body = &self.promoted[promoted];
self.sanitize_promoted(promoted_body, location);
let promoted_ty = promoted_body.return_ty();
- check_err(self, &promoted_body, ty, promoted_ty);
+ check_err(self, promoted_body, ty, promoted_ty);
}
} else {
if let Err(terr) = self.cx.fully_perform_op(
fn new(
cx: &'a mut TypeChecker<'b, 'tcx>,
body: &'b Body<'tcx>,
- promoted: &'b IndexVec<Promoted, ReadOnlyBodyAndCache<'b, 'tcx>>,
+ promoted: &'b IndexVec<Promoted, Body<'tcx>>,
) -> Self {
TypeVerifier {
body,
place_ty
}
- fn sanitize_promoted(
- &mut self,
- promoted_body: ReadOnlyBodyAndCache<'b, 'tcx>,
- location: Location,
- ) {
+ fn sanitize_promoted(&mut self, promoted_body: &'b Body<'tcx>, location: Location) {
// Determine the constraints from the promoted MIR by running the type
// checker on the promoted MIR, then transfer the constraints back to
// the main MIR, changing the locations to the provided location.
- let parent_body = mem::replace(&mut self.body, *promoted_body);
+ let parent_body = mem::replace(&mut self.body, promoted_body);
// Use new sets of constraints and closure bounds so that we can
// modify their locations.
self.infcx.tcx
}
- fn check_stmt(
- &mut self,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
- stmt: &Statement<'tcx>,
- location: Location,
- ) {
+ fn check_stmt(&mut self, body: &Body<'tcx>, stmt: &Statement<'tcx>, location: Location) {
debug!("check_stmt: {:?}", stmt);
let tcx = self.tcx();
match stmt.kind {
_ => ConstraintCategory::Assignment,
};
- let place_ty = place.ty(*body, tcx).ty;
+ let place_ty = place.ty(body, tcx).ty;
let place_ty = self.normalize(place_ty, location);
- let rv_ty = rv.ty(*body, tcx);
+ let rv_ty = rv.ty(body, tcx);
let rv_ty = self.normalize(rv_ty, location);
if let Err(terr) =
self.sub_types_or_anon(rv_ty, place_ty, location.to_locations(), category)
}
}
StatementKind::SetDiscriminant { ref place, variant_index } => {
- let place_type = place.ty(*body, tcx).ty;
+ let place_type = place.ty(body, tcx).ty;
let adt = match place_type.kind {
ty::Adt(adt, _) if adt.is_enum() => adt,
_ => {
};
}
StatementKind::AscribeUserType(box (ref place, ref projection), variance) => {
- let place_ty = place.ty(*body, tcx).ty;
+ let place_ty = place.ty(body, tcx).ty;
if let Err(terr) = self.relate_type_and_user_type(
place_ty,
variance,
}
}
- fn check_rvalue(
- &mut self,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
- rvalue: &Rvalue<'tcx>,
- location: Location,
- ) {
+ fn check_rvalue(&mut self, body: &Body<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
let tcx = self.tcx();
match rvalue {
// While this is located in `nll::typeck` this error is not an NLL error, it's
// a required check to make sure that repeated elements implement `Copy`.
let span = body.source_info(location).span;
- let ty = operand.ty(*body, tcx);
+ let ty = operand.ty(body, tcx);
if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) {
// To determine if `const_in_array_repeat_expressions` feature gate should
// be mentioned, need to check if the rvalue is promotable.
Rvalue::Cast(cast_kind, op, ty) => {
match cast_kind {
CastKind::Pointer(PointerCast::ReifyFnPointer) => {
- let fn_sig = op.ty(*body, tcx).fn_sig(tcx);
+ let fn_sig = op.ty(body, tcx).fn_sig(tcx);
// The type that we see in the fcx is like
// `foo::<'a, 'b>`, where `foo` is the path to a
}
CastKind::Pointer(PointerCast::ClosureFnPointer(unsafety)) => {
- let sig = match op.ty(*body, tcx).kind {
+ let sig = match op.ty(body, tcx).kind {
ty::Closure(_, substs) => substs.as_closure().sig(),
_ => bug!(),
};
}
CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
- let fn_sig = op.ty(*body, tcx).fn_sig(tcx);
+ let fn_sig = op.ty(body, tcx).fn_sig(tcx);
// The type that we see in the fcx is like
// `foo::<'a, 'b>`, where `foo` is the path to a
let &ty = ty;
let trait_ref = ty::TraitRef {
def_id: tcx.lang_items().coerce_unsized_trait().unwrap(),
- substs: tcx.mk_substs_trait(op.ty(*body, tcx), &[ty.into()]),
+ substs: tcx.mk_substs_trait(op.ty(body, tcx), &[ty.into()]),
};
self.prove_trait_ref(
}
CastKind::Pointer(PointerCast::MutToConstPointer) => {
- let ty_from = match op.ty(*body, tcx).kind {
+ let ty_from = match op.ty(body, tcx).kind {
ty::RawPtr(ty::TypeAndMut {
ty: ty_from,
mutbl: hir::Mutability::Mut,
}
CastKind::Pointer(PointerCast::ArrayToPointer) => {
- let ty_from = op.ty(*body, tcx);
+ let ty_from = op.ty(body, tcx);
let opt_ty_elem = match ty_from.kind {
ty::RawPtr(ty::TypeAndMut {
}
CastKind::Misc => {
- let ty_from = op.ty(*body, tcx);
+ let ty_from = op.ty(body, tcx);
let cast_ty_from = CastTy::from_ty(ty_from);
let cast_ty_to = CastTy::from_ty(ty);
match (cast_ty_from, cast_ty_to) {
left,
right,
) => {
- let ty_left = left.ty(*body, tcx);
+ let ty_left = left.ty(body, tcx);
if let ty::RawPtr(_) | ty::FnPtr(_) = ty_left.kind {
- let ty_right = right.ty(*body, tcx);
+ let ty_right = right.ty(body, tcx);
let common_ty = self.infcx.next_ty_var(TypeVariableOrigin {
kind: TypeVariableOriginKind::MiscVariable,
span: body.source_info(location).span,
})
}
- fn typeck_mir(&mut self, body: ReadOnlyBodyAndCache<'_, 'tcx>) {
+ fn typeck_mir(&mut self, body: &Body<'tcx>) {
self.last_span = body.span;
debug!("run_on_mir: {:?}", body.span);
}
// This is a const fn. Call it.
Ok(Some(match ecx.load_mir(instance.def, None) {
- Ok(body) => *body,
+ Ok(body) => body,
Err(err) => {
if let err_unsup!(NoMirFor(did)) = err.kind {
let path = ecx.tcx.def_path_str(did);
/// Dataflow analysis that determines whether each local requires storage at a
/// given location; i.e. whether its storage can go away without being observed.
pub struct MaybeRequiresStorage<'mir, 'tcx> {
- body: ReadOnlyBodyAndCache<'mir, 'tcx>,
+ body: &'mir Body<'tcx>,
borrowed_locals: RefCell<BorrowedLocalsResults<'mir, 'tcx>>,
}
impl<'mir, 'tcx> MaybeRequiresStorage<'mir, 'tcx> {
pub fn new(
- body: ReadOnlyBodyAndCache<'mir, 'tcx>,
+ body: &'mir Body<'tcx>,
borrowed_locals: &'mir Results<'tcx, MaybeBorrowedLocals>,
) -> Self {
MaybeRequiresStorage {
&self,
instance: ty::InstanceDef<'tcx>,
promoted: Option<mir::Promoted>,
- ) -> InterpResult<'tcx, mir::ReadOnlyBodyAndCache<'tcx, 'tcx>> {
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
// do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id();
if did.is_local() && self.tcx.has_typeck_tables(did) {
}
trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
if let Some(promoted) = promoted {
- return Ok(self.tcx.promoted_mir(did)[promoted].unwrap_read_only());
+ return Ok(&self.tcx.promoted_mir(did)[promoted]);
}
match instance {
ty::InstanceDef::Item(def_id) => {
if self.tcx.is_mir_available(did) {
- Ok(self.tcx.optimized_mir(did).unwrap_read_only())
+ Ok(self.tcx.optimized_mir(did))
} else {
throw_unsup!(NoMirFor(def_id))
}
providers.mir_shims = make_shim;
}
-fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx BodyAndCache<'tcx> {
+fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx Body<'tcx> {
debug!("make_shim({:?})", instance);
let mut result = match instance {
debug!("make_shim({:?}) = {:?}", instance, result);
- result.ensure_predecessors();
tcx.arena.alloc(result)
}
.collect()
}
-fn build_drop_shim<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- ty: Option<Ty<'tcx>>,
-) -> BodyAndCache<'tcx> {
+fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
// Check if this is a generator, if so, return the drop glue for it
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
- let body = new_body(blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
-
- let mut body = BodyAndCache::new(body);
+ let mut body = new_body(blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
if let Some(..) = ty {
// The first argument (index 0), but add 1 for the return value.
}
/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
-fn build_clone_shim<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- self_ty: Ty<'tcx>,
-) -> BodyAndCache<'tcx> {
+fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
debug!("build_clone_shim(def_id={:?})", def_id);
let param_env = tcx.param_env(def_id);
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
- BodyAndCache::new(builder.into_mir())
+ builder.into_mir()
}
struct CloneShimBuilder<'tcx> {
rcvr_adjustment: Option<Adjustment>,
call_kind: CallKind,
untuple_args: Option<&[Ty<'tcx>]>,
-) -> BodyAndCache<'tcx> {
+) -> Body<'tcx> {
debug!(
"build_call_shim(instance={:?}, rcvr_adjustment={:?}, \
call_kind={:?}, untuple_args={:?})",
if let Abi::RustCall = sig.abi {
body.spread_arg = Some(Local::new(sig.inputs().len()));
}
- BodyAndCache::new(body)
+
+ body
}
-pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> &BodyAndCache<'_> {
+pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> &Body<'_> {
debug_assert!(tcx.is_constructor(ctor_id));
let span =
|_, _| Ok(()),
);
- let mut body = BodyAndCache::new(body);
- body.ensure_predecessors();
tcx.arena.alloc(body)
}
*/
impl<'tcx> MirPass<'tcx> for AddCallGuards {
- fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
self.add_call_guards(body);
}
}
impl AddCallGuards {
- pub fn add_call_guards(&self, body: &mut BodyAndCache<'_>) {
+ pub fn add_call_guards(&self, body: &mut Body<'_>) {
let pred_count: IndexVec<_, _> = body.predecessors().iter().map(|ps| ps.len()).collect();
// We need a place to store the new blocks generated
pub struct AddMovesForPackedDrops;
impl<'tcx> MirPass<'tcx> for AddMovesForPackedDrops {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
debug!("add_moves_for_packed_drops({:?} @ {:?})", src, body.span);
add_moves_for_packed_drops(tcx, body, src.def_id());
}
}
-pub fn add_moves_for_packed_drops<'tcx>(
- tcx: TyCtxt<'tcx>,
- body: &mut BodyAndCache<'tcx>,
- def_id: DefId,
-) {
+pub fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, def_id: DefId) {
let patch = add_moves_for_packed_drops_patch(tcx, body, def_id);
patch.apply(body);
}
}
impl<'tcx> MirPass<'tcx> for AddRetag {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
if !tcx.sess.opts.debugging_opts.mir_emit_retag {
return;
}
/// Information about the item currently being const-checked, as well as a reference to the global
/// context.
pub struct Item<'mir, 'tcx> {
- pub body: mir::ReadOnlyBodyAndCache<'mir, 'tcx>,
+ pub body: &'mir mir::Body<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub def_id: DefId,
pub param_env: ty::ParamEnv<'tcx>,
}
impl Item<'mir, 'tcx> {
- pub fn new(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- body: mir::ReadOnlyBodyAndCache<'mir, 'tcx>,
- ) -> Self {
+ pub fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &'mir mir::Body<'tcx>) -> Self {
let param_env = tcx.param_env(def_id);
let const_kind = ConstKind::for_item(tcx, def_id);
F: FnMut(Local) -> bool,
{
match rvalue {
- Rvalue::NullaryOp(..) => Q::in_any_value_of_ty(cx, rvalue.ty(*cx.body, cx.tcx)),
+ Rvalue::NullaryOp(..) => Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)),
Rvalue::Discriminant(place) | Rvalue::Len(place) => {
in_place::<Q, _>(cx, in_local, place.as_ref())
Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
// Special-case reborrows to be more like a copy of the reference.
if let &[ref proj_base @ .., ProjectionElem::Deref] = place.projection.as_ref() {
- let base_ty = Place::ty_from(place.local, proj_base, *cx.body, cx.tcx).ty;
+ let base_ty = Place::ty_from(place.local, proj_base, cx.body, cx.tcx).ty;
if let ty::Ref(..) = base_ty.kind {
return in_place::<Q, _>(
cx,
| ProjectionElem::Index(_) => {}
}
- let base_ty = Place::ty_from(place.local, proj_base, *cx.body, cx.tcx);
+ let base_ty = Place::ty_from(place.local, proj_base, cx.body, cx.tcx);
let proj_ty = base_ty.projection_ty(cx.tcx, proj_elem).ty;
if !Q::in_any_value_of_ty(cx, proj_ty) {
return false;
) {
// We cannot reason about another function's internals, so use conservative type-based
// qualification for the result of a function call.
- let return_ty = return_place.ty(*self.item.body, self.item.tcx).ty;
+ let return_ty = return_place.ty(self.item.body, self.item.tcx).ty;
let qualif = Q::in_any_value_of_ty(self.item, return_ty);
if !return_place.is_indirect() {
impl<Q: Qualif> QualifCursor<'a, 'mir, 'tcx, Q> {
pub fn new(q: Q, item: &'a Item<'mir, 'tcx>) -> Self {
let cursor = FlowSensitiveAnalysis::new(q, item)
- .into_engine(item.tcx, &item.body, item.def_id)
+ .into_engine(item.tcx, item.body, item.def_id)
.iterate_to_fixpoint()
- .into_results_cursor(*item.body);
+ .into_results_cursor(item.body);
let mut in_any_value_of_ty = BitSet::new_empty(item.body.local_decls.len());
for (local, decl) in item.body.local_decls.iter_enumerated() {
//
// FIXME(ecstaticmorse): Someday we want to allow custom drop impls. How do we do this
// without breaking stable code?
- let indirectly_mutable = MaybeMutBorrowedLocals::mut_borrows_only(tcx, *body, param_env)
+ let indirectly_mutable = MaybeMutBorrowedLocals::mut_borrows_only(tcx, body, param_env)
.unsound_ignore_borrow_on_drop()
- .into_engine(tcx, *body, def_id)
+ .into_engine(tcx, body, def_id)
.iterate_to_fixpoint()
- .into_results_cursor(*body);
+ .into_results_cursor(body);
let qualifs = Qualifs { needs_drop, has_mut_interior, indirectly_mutable };
// Special-case reborrows to be more like a copy of a reference.
match *rvalue {
Rvalue::Ref(_, kind, place) => {
- if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, *self.body, place) {
+ if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, self.body, place) {
let ctx = match kind {
BorrowKind::Shared => {
PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
}
}
Rvalue::AddressOf(mutbl, place) => {
- if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, *self.body, place) {
+ if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, self.body, place) {
let ctx = match mutbl {
Mutability::Not => {
PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
| Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
- let ty = place.ty(*self.body, self.tcx).ty;
+ let ty = place.ty(self.body, self.tcx).ty;
let is_allowed = match ty.kind {
// Inside a `static mut`, `&mut [...]` is allowed.
ty::Array(..) | ty::Slice(_) if self.const_kind() == ConstKind::StaticMut => {
}
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => {
- let operand_ty = operand.ty(*self.body, self.tcx);
+ let operand_ty = operand.ty(self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
}
Rvalue::BinaryOp(op, ref lhs, _) => {
- if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(*self.body, self.tcx).kind {
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).kind {
assert!(
op == BinOp::Eq
|| op == BinOp::Ne
match elem {
ProjectionElem::Deref => {
- let base_ty = Place::ty_from(place_local, proj_base, *self.body, self.tcx).ty;
+ let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
if let ty::RawPtr(_) = base_ty.kind {
if proj_base.is_empty() {
if let (local, []) = (place_local, proj_base) {
| ProjectionElem::Subslice { .. }
| ProjectionElem::Field(..)
| ProjectionElem::Index(_) => {
- let base_ty = Place::ty_from(place_local, proj_base, *self.body, self.tcx).ty;
+ let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
match base_ty.ty_adt_def() {
Some(def) if def.is_union() => {
self.check_op(ops::UnionAccess);
match &terminator.kind {
TerminatorKind::Call { func, .. } => {
- let fn_ty = func.ty(*self.body, self.tcx);
+ let fn_ty = func.ty(self.body, self.tcx);
let (def_id, substs) = match fn_ty.kind {
ty::FnDef(def_id, substs) => (def_id, substs),
// Check to see if the type of this place can ever have a drop impl. If not, this
// `Drop` terminator is frivolous.
let ty_needs_drop =
- dropped_place.ty(*self.body, self.tcx).ty.needs_drop(self.tcx, self.param_env);
+ dropped_place.ty(self.body, self.tcx).ty.needs_drop(self.tcx, self.param_env);
if !ty_needs_drop {
return;
hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => (true, false),
};
let mut checker = UnsafetyChecker::new(const_context, min_const_fn, body, tcx, param_env);
- // mir_built ensures that body has a computed cache, so we don't (and can't) attempt to
- // recompute it here.
- let body = body.unwrap_read_only();
checker.visit_body(&body);
check_unused_unsafe(tcx, def_id, &checker.used_unsafe, &mut checker.inherited_blocks);
use crate::transform::{MirPass, MirSource};
use rustc_middle::mir::visit::MutVisitor;
-use rustc_middle::mir::{BodyAndCache, BorrowKind, Location, Rvalue};
+use rustc_middle::mir::{Body, BorrowKind, Location, Rvalue};
use rustc_middle::mir::{Statement, StatementKind};
use rustc_middle::ty::TyCtxt;
}
impl<'tcx> MirPass<'tcx> for CleanupNonCodegenStatements {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
let mut delete = DeleteNonCodegenStatements { tcx };
delete.visit_body(body);
body.user_type_annotations.raw.clear();
MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
};
use rustc_middle::mir::{
- read_only, AggregateKind, AssertKind, BasicBlock, BinOp, Body, BodyAndCache, ClearCrossCrate,
- Constant, Local, LocalDecl, LocalKind, Location, Operand, Place, ReadOnlyBodyAndCache, Rvalue,
- SourceInfo, SourceScope, SourceScopeData, Statement, StatementKind, Terminator, TerminatorKind,
- UnOp, RETURN_PLACE,
+ AggregateKind, AssertKind, BasicBlock, BinOp, Body, ClearCrossCrate, Constant, Local,
+ LocalDecl, LocalKind, Location, Operand, Place, Rvalue, SourceInfo, SourceScope,
+ SourceScopeData, Statement, StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutError, TyAndLayout};
use rustc_middle::ty::subst::{InternalSubsts, Subst};
pub struct ConstProp;
impl<'tcx> MirPass<'tcx> for ConstProp {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
// will be evaluated by miri and produce its errors there
if source.promoted.is_some() {
return;
// constants, instead of just checking for const-folding succeeding.
// That would require an uniform one-def no-mutation analysis
// and RPO (or recursing when needing the value of a local).
- let mut optimization_finder =
- ConstPropagator::new(read_only!(body), dummy_body, tcx, source);
+ let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx, source);
optimization_finder.visit_body(body);
trace!("ConstProp done for {:?}", source.def_id());
impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn new(
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
dummy_body: &'mir Body<'tcx>,
tcx: TyCtxt<'tcx>,
source: MirSource<'tcx>,
impl CanConstProp {
/// returns true if `local` can be propagated
- fn check(body: ReadOnlyBodyAndCache<'_, '_>) -> IndexVec<Local, ConstPropMode> {
+ fn check(body: &Body<'_>) -> IndexVec<Local, ConstPropMode> {
let mut cpv = CanConstProp {
can_const_prop: IndexVec::from_elem(ConstPropMode::FullConstProp, &body.local_decls),
found_assignment: IndexVec::from_elem(false, &body.local_decls),
use crate::util::def_use::DefUseAnalysis;
use rustc_middle::mir::visit::MutVisitor;
use rustc_middle::mir::{
- read_only, Body, BodyAndCache, Constant, Local, LocalKind, Location, Operand, Place, Rvalue,
- StatementKind,
+ Body, Constant, Local, LocalKind, Location, Operand, Place, Rvalue, StatementKind,
};
use rustc_middle::ty::TyCtxt;
pub struct CopyPropagation;
impl<'tcx> MirPass<'tcx> for CopyPropagation {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
// We only run when the MIR optimization level is > 1.
// This avoids a slow pass, and messing up debug info.
if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
let mut def_use_analysis = DefUseAnalysis::new(body);
loop {
- def_use_analysis.analyze(read_only!(body));
+ def_use_analysis.analyze(body);
if eliminate_self_assignments(body, &def_use_analysis) {
- def_use_analysis.analyze(read_only!(body));
+ def_use_analysis.analyze(body);
}
let mut changed = false;
fn perform(
self,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
def_use_analysis: &DefUseAnalysis,
dest_local: Local,
location: Location,
pub struct Deaggregator;
impl<'tcx> MirPass<'tcx> for Deaggregator {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, body: &mut Body<'tcx>) {
let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
let local_decls = &*local_decls;
for bb in basic_blocks {
use crate::transform::{MirPass, MirSource};
use crate::util as mir_util;
-use rustc_middle::mir::{Body, BodyAndCache};
+use rustc_middle::mir::Body;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{OutputFilenames, OutputType};
Cow::Borrowed(self.0)
}
- fn run_pass(
- &self,
- _tcx: TyCtxt<'tcx>,
- _source: MirSource<'tcx>,
- _body: &mut BodyAndCache<'tcx>,
- ) {
- }
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, _source: MirSource<'tcx>, _body: &mut Body<'tcx>) {}
}
pub struct Disambiguator {
pub struct ElaborateDrops;
impl<'tcx> MirPass<'tcx> for ElaborateDrops {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
debug!("elaborate_drops({:?} @ {:?})", src, body.span);
let def_id = src.def_id();
}
}
-fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut BodyAndCache<'tcx>) {
+fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let gen_ty = body.local_decls.raw[1].ty;
let ref_gen_ty =
DerefArgVisitor { tcx }.visit_body(body);
}
-fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut BodyAndCache<'tcx>) {
+fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let ref_gen_ty = body.local_decls.raw[1].ty;
let pin_did = tcx.lang_items().pin_type().unwrap();
fn replace_local<'tcx>(
local: Local,
ty: Ty<'tcx>,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
tcx: TyCtxt<'tcx>,
) -> Local {
let source_info = source_info(body);
fn locals_live_across_suspend_points(
tcx: TyCtxt<'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
source: MirSource<'tcx>,
always_live_locals: &storage::AlwaysLiveLocals,
movable: bool,
interior: Ty<'tcx>,
always_live_locals: &storage::AlwaysLiveLocals,
movable: bool,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
) -> (
FxHashMap<Local, (Ty<'tcx>, VariantIdx, usize)>,
GeneratorLayout<'tcx>,
live_locals_at_suspension_points,
storage_conflicts,
storage_liveness,
- } = locals_live_across_suspend_points(
- tcx,
- read_only!(body),
- source,
- always_live_locals,
- movable,
- );
+ } = locals_live_across_suspend_points(tcx, body, source, always_live_locals, movable);
// Erase regions from the types passed in from typeck so we can compare them with
// MIR types
///
/// After this function, the former entry point of the function will be bb1.
fn insert_switch<'tcx>(
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
cases: Vec<(usize, BasicBlock)>,
transform: &TransformVisitor<'tcx>,
default: TerminatorKind<'tcx>,
}
}
-fn elaborate_generator_drops<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- body: &mut BodyAndCache<'tcx>,
-) {
+fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, body: &mut Body<'tcx>) {
use crate::shim::DropShimElaborator;
use crate::util::elaborate_drops::{elaborate_drop, Unwind};
use crate::util::patch::MirPatch;
transform: &TransformVisitor<'tcx>,
source: MirSource<'tcx>,
gen_ty: Ty<'tcx>,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
drop_clean: BasicBlock,
-) -> BodyAndCache<'tcx> {
+) -> Body<'tcx> {
let mut body = body.clone();
body.arg_count = 1; // make sure the resume argument is not included here
body
}
-fn insert_term_block<'tcx>(
- body: &mut BodyAndCache<'tcx>,
- kind: TerminatorKind<'tcx>,
-) -> BasicBlock {
+fn insert_term_block<'tcx>(body: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock {
let term_block = BasicBlock::new(body.basic_blocks().len());
let source_info = source_info(body);
body.basic_blocks_mut().push(BasicBlockData {
fn insert_panic_block<'tcx>(
tcx: TyCtxt<'tcx>,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
message: AssertMessage<'tcx>,
) -> BasicBlock {
let assert_block = BasicBlock::new(body.basic_blocks().len());
tcx: TyCtxt<'tcx>,
transform: TransformVisitor<'tcx>,
source: MirSource<'tcx>,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
can_return: bool,
) {
let can_unwind = can_unwind(tcx, body);
SourceInfo { span: body.span, scope: OUTERMOST_SOURCE_SCOPE }
}
-fn insert_clean_drop(body: &mut BodyAndCache<'_>) -> BasicBlock {
+fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
let return_block = insert_term_block(body, TerminatorKind::Return);
// Create a block to destroy an unresumed generators. This can only destroy upvars.
}
fn create_cases<'tcx>(
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
transform: &TransformVisitor<'tcx>,
operation: Operation,
) -> Vec<(usize, BasicBlock)> {
}
impl<'tcx> MirPass<'tcx> for StateTransform {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
let yield_ty = if let Some(yield_ty) = body.yield_ty {
yield_ty
} else {
}
impl<'tcx> MirPass<'tcx> for Inline {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
Inliner { tcx, source }.run_pass(body);
}
}
impl Inliner<'tcx> {
- fn run_pass(&self, caller_body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, caller_body: &mut Body<'tcx>) {
// Keep a queue of callsites to try inlining on. We take
// advantage of the fact that queries detect cycles here to
// allow us to try and fetch the fully optimized MIR of a
fn inline_call(
&self,
callsite: CallSite<'tcx>,
- caller_body: &mut BodyAndCache<'tcx>,
- mut callee_body: BodyAndCache<'tcx>,
+ caller_body: &mut Body<'tcx>,
+ mut callee_body: Body<'tcx>,
) -> bool {
let terminator = caller_body[callsite.bb].terminator.take().unwrap();
match terminator.kind {
destination.0,
);
- let ty = dest.ty(&**caller_body, self.tcx);
+ let ty = dest.ty(caller_body, self.tcx);
let temp = LocalDecl::new_temp(ty, callsite.location.span);
&self,
args: Vec<Operand<'tcx>>,
callsite: &CallSite<'tcx>,
- caller_body: &mut BodyAndCache<'tcx>,
+ caller_body: &mut Body<'tcx>,
) -> Vec<Local> {
let tcx = self.tcx;
assert!(args.next().is_none());
let tuple = Place::from(tuple);
- let tuple_tys = if let ty::Tuple(s) = tuple.ty(&**caller_body, tcx).ty.kind {
+ let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind {
s
} else {
bug!("Closure arguments are not passed as a tuple");
&self,
arg: Operand<'tcx>,
callsite: &CallSite<'tcx>,
- caller_body: &mut BodyAndCache<'tcx>,
+ caller_body: &mut Body<'tcx>,
) -> Local {
// FIXME: Analysis of the usage of the arguments to avoid
// unnecessary temporaries.
// Otherwise, create a temporary for the arg
let arg = Rvalue::Use(arg);
- let ty = arg.ty(&**caller_body, self.tcx);
+ let ty = arg.ty(caller_body, self.tcx);
let arg_tmp = LocalDecl::new_temp(ty, callsite.location.span);
let arg_tmp = caller_body.local_decls.push(arg_tmp);
use rustc_index::vec::Idx;
use rustc_middle::mir::visit::{MutVisitor, Visitor};
use rustc_middle::mir::{
- read_only, Body, BodyAndCache, Constant, Local, Location, Operand, Place, PlaceRef,
- ProjectionElem, Rvalue,
+ Body, Constant, Local, Location, Operand, Place, PlaceRef, ProjectionElem, Rvalue,
};
use rustc_middle::ty::{self, TyCtxt};
use std::mem;
pub struct InstCombine;
impl<'tcx> MirPass<'tcx> for InstCombine {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
// We only run when optimizing MIR (at any level).
if tcx.sess.opts.debugging_opts.mir_opt_level == 0 {
return;
// read-only so that we can do global analyses on the MIR in the process (e.g.
// `Place::ty()`).
let optimizations = {
- let read_only_cache = read_only!(body);
let mut optimization_finder = OptimizationFinder::new(body, tcx);
- optimization_finder.visit_body(&read_only_cache);
+ optimization_finder.visit_body(body);
optimization_finder.optimizations
};
use rustc_hir::def_id::{CrateNum, DefId, DefIdSet, LocalDefId, LOCAL_CRATE};
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_index::vec::IndexVec;
-use rustc_middle::mir::{BodyAndCache, ConstQualifs, MirPhase, Promoted};
+use rustc_middle::mir::{Body, ConstQualifs, MirPhase, Promoted};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::steal::Steal;
use rustc_middle::ty::{InstanceDef, TyCtxt, TypeFoldable};
default_name::<Self>()
}
- fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>);
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>);
}
pub fn run_passes(
tcx: TyCtxt<'tcx>,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
instance: InstanceDef<'tcx>,
promoted: Option<Promoted>,
mir_phase: MirPhase,
return Default::default();
}
- let item = check_consts::Item {
- body: body.unwrap_read_only(),
- tcx,
- def_id,
- const_kind,
- param_env: tcx.param_env(def_id),
- };
+ let item =
+ check_consts::Item { body, tcx, def_id, const_kind, param_env: tcx.param_env(def_id) };
let mut validator = check_consts::validation::Validator::new(&item);
validator.check_body();
validator.qualifs_in_return_place()
}
-fn mir_const(tcx: TyCtxt<'_>, def_id: DefId) -> &Steal<BodyAndCache<'_>> {
+fn mir_const(tcx: TyCtxt<'_>, def_id: DefId) -> &Steal<Body<'_>> {
// Unsafety check uses the raw mir, so make sure it is run
let _ = tcx.unsafety_check_result(def_id);
&rustc_peek::SanityCheck,
],
);
- body.ensure_predecessors();
tcx.alloc_steal_mir(body)
}
fn mir_validated(
tcx: TyCtxt<'tcx>,
def_id: DefId,
-) -> (&'tcx Steal<BodyAndCache<'tcx>>, &'tcx Steal<IndexVec<Promoted, BodyAndCache<'tcx>>>) {
+) -> (&'tcx Steal<Body<'tcx>>, &'tcx Steal<IndexVec<Promoted, Body<'tcx>>>) {
// Ensure that we compute the `mir_const_qualif` for constants at
// this point, before we steal the mir-const result.
let _ = tcx.mir_const_qualif(def_id);
fn run_optimization_passes<'tcx>(
tcx: TyCtxt<'tcx>,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
def_id: DefId,
promoted: Option<Promoted>,
) {
);
}
-fn optimized_mir(tcx: TyCtxt<'_>, def_id: DefId) -> &BodyAndCache<'_> {
+fn optimized_mir(tcx: TyCtxt<'_>, def_id: DefId) -> &Body<'_> {
if tcx.is_constructor(def_id) {
// There's no reason to run all of the MIR passes on constructors when
// we can just output the MIR we want directly. This also saves const
let (body, _) = tcx.mir_validated(def_id);
let mut body = body.steal();
run_optimization_passes(tcx, &mut body, def_id, None);
- body.ensure_predecessors();
debug_assert!(!body.has_free_regions(), "Free regions in optimized MIR");
tcx.arena.alloc(body)
}
-fn promoted_mir(tcx: TyCtxt<'_>, def_id: DefId) -> &IndexVec<Promoted, BodyAndCache<'_>> {
+fn promoted_mir(tcx: TyCtxt<'_>, def_id: DefId) -> &IndexVec<Promoted, Body<'_>> {
if tcx.is_constructor(def_id) {
return tcx.intern_promoted(IndexVec::new());
}
for (p, mut body) in promoted.iter_enumerated_mut() {
run_optimization_passes(tcx, &mut body, def_id, Some(p));
- body.ensure_predecessors();
}
debug_assert!(!promoted.has_free_regions(), "Free regions in promoted MIR");
}
impl<'tcx> MirPass<'tcx> for NoLandingPads<'tcx> {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
no_landing_pads(tcx, body)
}
}
-pub fn no_landing_pads<'tcx>(tcx: TyCtxt<'tcx>, body: &mut BodyAndCache<'tcx>) {
+pub fn no_landing_pads<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
if tcx.sess.no_landing_pads() {
NoLandingPads::new(tcx).visit_body(body);
}
/// newly created `Constant`.
#[derive(Default)]
pub struct PromoteTemps<'tcx> {
- pub promoted_fragments: Cell<IndexVec<Promoted, BodyAndCache<'tcx>>>,
+ pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
}
impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
// There's not really any point in promoting errorful MIR.
//
// This does not include MIR that failed const-checking, which we still try to promote.
let mut rpo = traversal::reverse_postorder(body);
let (temps, all_candidates) = collect_temps_and_candidates(tcx, body, &mut rpo);
- let promotable_candidates =
- validate_candidates(tcx, read_only!(body), def_id, &temps, &all_candidates);
+ let promotable_candidates = validate_candidates(tcx, body, def_id, &temps, &all_candidates);
let promoted = promote_candidates(def_id, body, tcx, temps, promotable_candidates);
self.promoted_fragments.set(promoted);
// FIXME(eddyb) this is probably excessive, with
// the exception of `union` member accesses.
let ty =
- Place::ty_from(place.local, proj_base, *self.body, self.tcx)
+ Place::ty_from(place.local, proj_base, self.body, self.tcx)
.projection_ty(self.tcx, elem)
.ty;
if ty.is_freeze(self.tcx, self.param_env, DUMMY_SP) {
}
if let BorrowKind::Mut { .. } = kind {
- let ty = place.ty(*self.body, self.tcx).ty;
+ let ty = place.ty(self.body, self.tcx).ty;
// In theory, any zero-sized value could be borrowed
// mutably without consequences. However, only &mut []
ProjectionElem::Field(..) => {
if self.const_kind.is_none() {
let base_ty =
- Place::ty_from(place.local, proj_base, *self.body, self.tcx).ty;
+ Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
if let Some(def) = base_ty.ty_adt_def() {
// No promotion of union field accesses.
if def.is_union() {
fn validate_rvalue(&self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
match *rvalue {
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) if self.const_kind.is_none() => {
- let operand_ty = operand.ty(*self.body, self.tcx);
+ let operand_ty = operand.ty(self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
}
Rvalue::BinaryOp(op, ref lhs, _) if self.const_kind.is_none() => {
- if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(*self.body, self.tcx).kind {
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).kind {
assert!(
op == BinOp::Eq
|| op == BinOp::Ne
// Raw reborrows can come from reference to pointer coercions,
// so are allowed.
if let [proj_base @ .., ProjectionElem::Deref] = place.projection.as_ref() {
- let base_ty = Place::ty_from(place.local, proj_base, *self.body, self.tcx).ty;
+ let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
if let ty::Ref(..) = base_ty.kind {
return self.validate_place(PlaceRef {
local: place.local,
Rvalue::Ref(_, kind, place) => {
if let BorrowKind::Mut { .. } = kind {
- let ty = place.ty(*self.body, self.tcx).ty;
+ let ty = place.ty(self.body, self.tcx).ty;
// In theory, any zero-sized value could be borrowed
// mutably without consequences. However, only &mut []
// Special-case reborrows to be more like a copy of the reference.
let mut place = place.as_ref();
if let [proj_base @ .., ProjectionElem::Deref] = &place.projection {
- let base_ty = Place::ty_from(place.local, proj_base, *self.body, self.tcx).ty;
+ let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
if let ty::Ref(..) = base_ty.kind {
place = PlaceRef { local: place.local, projection: proj_base };
}
while let [proj_base @ .., elem] = place_projection {
// FIXME(eddyb) this is probably excessive, with
// the exception of `union` member accesses.
- let ty = Place::ty_from(place.local, proj_base, *self.body, self.tcx)
+ let ty = Place::ty_from(place.local, proj_base, self.body, self.tcx)
.projection_ty(self.tcx, elem)
.ty;
if ty.is_freeze(self.tcx, self.param_env, DUMMY_SP) {
callee: &Operand<'tcx>,
args: &[Operand<'tcx>],
) -> Result<(), Unpromotable> {
- let fn_ty = callee.ty(*self.body, self.tcx);
+ let fn_ty = callee.ty(self.body, self.tcx);
if !self.explicit && self.const_kind.is_none() {
if let ty::FnDef(def_id, _) = fn_ty.kind {
// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
pub fn validate_candidates(
tcx: TyCtxt<'tcx>,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
def_id: DefId,
temps: &IndexVec<Local, TempState>,
candidates: &[Candidate],
struct Promoter<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
- source: &'a mut BodyAndCache<'tcx>,
- promoted: BodyAndCache<'tcx>,
+ source: &'a mut Body<'tcx>,
+ promoted: Body<'tcx>,
temps: &'a mut IndexVec<Local, TempState>,
extra_statements: &'a mut Vec<(Location, Statement<'tcx>)>,
def_id: DefId,
candidate: Candidate,
next_promoted_id: usize,
- ) -> Option<BodyAndCache<'tcx>> {
+ ) -> Option<Body<'tcx>> {
let mut rvalue = {
let promoted = &mut self.promoted;
let promoted_id = Promoted::new(next_promoted_id);
pub fn promote_candidates<'tcx>(
def_id: DefId,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
tcx: TyCtxt<'tcx>,
mut temps: IndexVec<Local, TempState>,
candidates: Vec<Candidate>,
-) -> IndexVec<Promoted, BodyAndCache<'tcx>> {
+) -> IndexVec<Promoted, Body<'tcx>> {
// Visit candidates in reverse, in case they're nested.
debug!("promote_candidates({:?})", candidates);
promoted.ignore_interior_mut_in_const_validation = true;
let promoter = Promoter {
- promoted: BodyAndCache::new(promoted),
+ promoted,
tcx,
source: body,
temps: &mut temps,
crate fn should_suggest_const_in_array_repeat_expressions_attribute<'tcx>(
tcx: TyCtxt<'tcx>,
mir_def_id: DefId,
- body: ReadOnlyBodyAndCache<'_, 'tcx>,
+ body: &Body<'tcx>,
operand: &Operand<'tcx>,
) -> bool {
let mut rpo = traversal::reverse_postorder(&body);
/// code for these.
pub struct RemoveNoopLandingPads;
-pub fn remove_noop_landing_pads<'tcx>(tcx: TyCtxt<'tcx>, body: &mut BodyAndCache<'tcx>) {
+pub fn remove_noop_landing_pads<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
if tcx.sess.no_landing_pads() {
return;
}
}
impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
remove_noop_landing_pads(tcx, body);
}
}
}
}
- fn remove_nop_landing_pads(&self, body: &mut BodyAndCache<'_>) {
+ fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
// make sure there's a single resume block
let resume_block = {
let patch = MirPatch::new(body);
use crate::transform::{MirPass, MirSource};
use rustc_hir::def_id::DefId;
use rustc_index::bit_set::BitSet;
-use rustc_middle::mir::{self, Body, BodyAndCache, Local, Location};
+use rustc_middle::mir::{self, Body, Local, Location};
use rustc_middle::ty::{self, Ty, TyCtxt};
use crate::dataflow::move_paths::{HasMoveData, MoveData};
pub struct SanityCheck;
impl<'tcx> MirPass<'tcx> for SanityCheck {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
use crate::dataflow::has_rustc_mir_with;
let def_id = src.def_id();
if !tcx.has_attr(def_id, sym::rustc_mir) {
}
}
-pub fn simplify_cfg(body: &mut BodyAndCache<'_>) {
+pub fn simplify_cfg(body: &mut Body<'_>) {
CfgSimplifier::new(body).simplify();
remove_dead_blocks(body);
Cow::Borrowed(&self.label)
}
- fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, _src: MirSource<'tcx>, body: &mut Body<'tcx>) {
debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body);
simplify_cfg(body);
}
}
impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
- pub fn new(body: &'a mut BodyAndCache<'tcx>) -> Self {
+ pub fn new(body: &'a mut Body<'tcx>) -> Self {
let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
// we can't use mir.predecessors() here because that counts
}
}
-pub fn remove_dead_blocks(body: &mut BodyAndCache<'_>) {
+pub fn remove_dead_blocks(body: &mut Body<'_>) {
let mut seen = BitSet::new_empty(body.basic_blocks().len());
for (bb, _) in traversal::preorder(body) {
seen.insert(bb.index());
pub struct SimplifyLocals;
impl<'tcx> MirPass<'tcx> for SimplifyLocals {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
trace!("running SimplifyLocals on {:?}", source);
// First, we're going to get a count of *actual* uses for every `Local`.
// Take a look at `DeclMarker::visit_local()` to see exactly what is ignored.
let mut used_locals = {
- let read_only_cache = read_only!(body);
let mut marker = DeclMarker::new(body);
- marker.visit_body(&read_only_cache);
+ marker.visit_body(&body);
marker.local_counts
};
Cow::Borrowed(&self.label)
}
- fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, src: MirSource<'tcx>, body: &mut Body<'tcx>) {
let param_env = tcx.param_env(src.def_id());
for block in body.basic_blocks_mut() {
let terminator = block.terminator_mut();
pub struct SimplifyArmIdentity;
impl<'tcx> MirPass<'tcx> for SimplifyArmIdentity {
- fn run_pass(&self, _: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, _: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
for bb in basic_blocks {
// Need 3 statements:
pub struct SimplifyBranchSame;
impl<'tcx> MirPass<'tcx> for SimplifyBranchSame {
- fn run_pass(&self, _: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, _: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
let mut did_remove_blocks = false;
let bbs = body.basic_blocks_mut();
for bb_idx in bbs.indices() {
use crate::transform::{MirPass, MirSource};
use rustc_middle::mir::{
- BasicBlock, BasicBlockData, Body, BodyAndCache, Local, Operand, Rvalue, StatementKind,
- TerminatorKind,
+ BasicBlock, BasicBlockData, Body, Local, Operand, Rvalue, StatementKind, TerminatorKind,
};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{Ty, TyCtxt};
}
impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
- fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
if source.promoted.is_some() {
return;
}
pub struct UnreachablePropagation;
impl MirPass<'_> for UnreachablePropagation {
- fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
+ fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
if tcx.sess.opts.debugging_opts.mir_opt_level < 3 {
// Enable only under -Zmir-opt-level=3 as in some cases (check the deeply-nested-opt
// perf benchmark) LLVM may spend quite a lot of time optimizing the generated code.
use rustc_index::vec::IndexVec;
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
-use rustc_middle::mir::{Body, BodyAndCache, Local, Location, ReadOnlyBodyAndCache, VarDebugInfo};
+use rustc_middle::mir::{Body, Local, Location, VarDebugInfo};
use rustc_middle::ty::TyCtxt;
use std::mem;
DefUseAnalysis { info: IndexVec::from_elem_n(Info::new(), body.local_decls.len()) }
}
- pub fn analyze(&mut self, body: ReadOnlyBodyAndCache<'_, '_>) {
+ pub fn analyze(&mut self, body: &Body<'_>) {
self.clear();
let mut finder = DefUseFinder {
fn mutate_defs_and_uses(
&self,
local: Local,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
new_local: Local,
tcx: TyCtxt<'tcx>,
) {
pub fn replace_all_defs_and_uses_with(
&self,
local: Local,
- body: &mut BodyAndCache<'tcx>,
+ body: &mut Body<'tcx>,
new_local: Local,
tcx: TyCtxt<'tcx>,
) {
/// Computes which local variables are live within the given function
/// `mir`, including drops.
-pub fn liveness_of_locals(body: ReadOnlyBodyAndCache<'_, '_>) -> LivenessResult {
+pub fn liveness_of_locals(body: &Body<'_>) -> LivenessResult {
let num_live_vars = body.local_decls.len();
let def_use: IndexVec<_, DefsUses> =
self.make_nop.push(loc);
}
- pub fn apply(self, body: &mut BodyAndCache<'tcx>) {
+ pub fn apply(self, body: &mut Body<'tcx>) {
debug!("MirPatch: make nops at: {:?}", self.make_nop);
for loc in self.make_nop {
body.make_statement_nop(loc);
use super::lints;
-crate fn mir_built(tcx: TyCtxt<'_>, def_id: DefId) -> &ty::steal::Steal<BodyAndCache<'_>> {
+crate fn mir_built(tcx: TyCtxt<'_>, def_id: DefId) -> &ty::steal::Steal<Body<'_>> {
tcx.alloc_steal_mir(mir_build(tcx, def_id))
}
/// Construct the MIR for a given `DefId`.
-fn mir_build(tcx: TyCtxt<'_>, def_id: DefId) -> BodyAndCache<'_> {
+fn mir_build(tcx: TyCtxt<'_>, def_id: DefId) -> Body<'_> {
let id = tcx.hir().as_local_hir_id(def_id).unwrap();
// Figure out what primary body this item has.
lints::check(tcx, &body, def_id);
- let mut body = BodyAndCache::new(body);
- body.ensure_predecessors();
-
// The borrow checker will replace all the regions here with its own
// inference variables. There's no point having non-erased regions here.
// The exception is `body.user_type_annotations`, which is used unmodified