mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
- lvalue.hash_stable(hcx, hasher);
+ mir::StatementKind::Assign(ref place, ref rvalue) => {
+ place.hash_stable(hcx, hasher);
rvalue.hash_stable(hcx, hasher);
}
- mir::StatementKind::SetDiscriminant { ref lvalue, variant_index } => {
- lvalue.hash_stable(hcx, hasher);
+ mir::StatementKind::SetDiscriminant { ref place, variant_index } => {
+ place.hash_stable(hcx, hasher);
variant_index.hash_stable(hcx, hasher);
}
- mir::StatementKind::StorageLive(ref lvalue) |
- mir::StatementKind::StorageDead(ref lvalue) => {
- lvalue.hash_stable(hcx, hasher);
+ mir::StatementKind::StorageLive(ref place) |
+ mir::StatementKind::StorageDead(ref place) => {
+ place.hash_stable(hcx, hasher);
}
mir::StatementKind::EndRegion(ref region_scope) => {
region_scope.hash_stable(hcx, hasher);
}
- mir::StatementKind::Validate(ref op, ref lvalues) => {
+ mir::StatementKind::Validate(ref op, ref places) => {
op.hash_stable(hcx, hasher);
- lvalues.hash_stable(hcx, hasher);
+ places.hash_stable(hcx, hasher);
}
mir::StatementKind::Nop => {}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
hcx: &mut StableHashingContext<'gcx>,
hasher: &mut StableHasher<W>)
{
- self.lval.hash_stable(hcx, hasher);
+ self.place.hash_stable(hcx, hasher);
self.ty.hash_stable(hcx, hasher);
self.re.hash_stable(hcx, hasher);
self.mutbl.hash_stable(hcx, hasher);
mir::Place::Static(ref statik) => {
statik.hash_stable(hcx, hasher);
}
- mir::Place::Projection(ref lvalue_projection) => {
- lvalue_projection.hash_stable(hcx, hasher);
+ mir::Place::Projection(ref place_projection) => {
+ place_projection.hash_stable(hcx, hasher);
}
}
}
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- mir::Operand::Copy(ref lvalue) => {
- lvalue.hash_stable(hcx, hasher);
+ mir::Operand::Copy(ref place) => {
+ place.hash_stable(hcx, hasher);
}
- mir::Operand::Move(ref lvalue) => {
- lvalue.hash_stable(hcx, hasher);
+ mir::Operand::Move(ref place) => {
+ place.hash_stable(hcx, hasher);
}
mir::Operand::Constant(ref constant) => {
constant.hash_stable(hcx, hasher);
operand.hash_stable(hcx, hasher);
val.hash_stable(hcx, hasher);
}
- mir::Rvalue::Ref(region, borrow_kind, ref lvalue) => {
+ mir::Rvalue::Ref(region, borrow_kind, ref place) => {
region.hash_stable(hcx, hasher);
borrow_kind.hash_stable(hcx, hasher);
- lvalue.hash_stable(hcx, hasher);
+ place.hash_stable(hcx, hasher);
}
- mir::Rvalue::Len(ref lvalue) => {
- lvalue.hash_stable(hcx, hasher);
+ mir::Rvalue::Len(ref place) => {
+ place.hash_stable(hcx, hasher);
}
mir::Rvalue::Cast(cast_kind, ref operand, ty) => {
cast_kind.hash_stable(hcx, hasher);
op.hash_stable(hcx, hasher);
operand.hash_stable(hcx, hasher);
}
- mir::Rvalue::Discriminant(ref lvalue) => {
- lvalue.hash_stable(hcx, hasher);
+ mir::Rvalue::Discriminant(ref place) => {
+ place.hash_stable(hcx, hasher);
}
mir::Rvalue::NullaryOp(op, ty) => {
op.hash_stable(hcx, hasher);
upvar_decls: Vec<UpvarDecl>,
span: Span) -> Self
{
- // We need `arg_count` locals, and one for the return pointer
+ // We need `arg_count` locals, and one for the return place
assert!(local_decls.len() >= arg_count + 1,
"expected at least {} locals, got {}", arg_count + 1, local_decls.len());
let index = local.0 as usize;
if index == 0 {
debug_assert!(self.local_decls[local].mutability == Mutability::Mut,
- "return pointer should be mutable");
+ "return place should be mutable");
LocalKind::ReturnPointer
} else if index < self.arg_count + 1 {
}
/// Returns an iterator over all user-defined variables and compiler-generated temporaries (all
- /// locals that are neither arguments nor the return pointer).
+ /// locals that are neither arguments nor the return place).
#[inline]
pub fn vars_and_temps_iter(&self) -> impl Iterator<Item=Local> {
let arg_count = self.arg_count;
/// Return the return type, it always return first element from `local_decls` array
pub fn return_ty(&self) -> Ty<'tcx> {
- self.local_decls[RETURN_POINTER].ty
+ self.local_decls[RETURN_PLACE].ty
}
}
newtype_index!(Local
{
DEBUG_FORMAT = "_{}",
- const RETURN_POINTER = 0,
+ const RETURN_PLACE = 0,
});
/// Classifies locals into categories. See `Mir::local_kind`.
/// A MIR local.
///
/// This can be a binding declared by the user, a temporary inserted by the compiler, a function
-/// argument, or the return pointer.
+/// argument, or the return place.
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct LocalDecl<'tcx> {
/// `let mut x` vs `let x`.
///
- /// Temporaries and the return pointer are always mutable.
+ /// Temporaries and the return place are always mutable.
pub mutability: Mutability,
/// True if this corresponds to a user-declared local variable.
}
}
- /// Builds a `LocalDecl` for the return pointer.
+ /// Builds a `LocalDecl` for the return place.
///
/// This must be inserted into the `local_decls` list as the first local.
#[inline]
- pub fn new_return_pointer(return_ty: Ty, span: Span) -> LocalDecl {
+ pub fn new_return_place(return_ty: Ty, span: Span) -> LocalDecl {
LocalDecl {
mutability: Mutability::Mut,
ty: return_ty,
/// continue. Emitted by build::scope::diverge_cleanup.
Resume,
- /// Indicates a normal return. The return pointer lvalue should
- /// have been filled in by now. This should occur at most once.
+ /// Indicates a normal return. The return place should have
+ /// been filled in by now. This should occur at most once.
Return,
/// Indicates a terminator that can never be reached.
/// Drop the Place and assign the new value over it. This ensures
/// that the assignment to LV occurs *even if* the destructor for
- /// lvalue unwinds. Its semantics are best explained by by the
+ /// place unwinds. Its semantics are best explained by by the
/// elaboration:
///
/// ```
use self::TerminatorKind::*;
match *self {
Goto { .. } => write!(fmt, "goto"),
- SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv),
+ SwitchInt { discr: ref place, .. } => write!(fmt, "switchInt({:?})", place),
Return => write!(fmt, "return"),
GeneratorDrop => write!(fmt, "generator_drop"),
Resume => write!(fmt, "resume"),
Assign(Place<'tcx>, Rvalue<'tcx>),
/// Write the discriminant for a variant to the enum Place.
- SetDiscriminant { lvalue: Place<'tcx>, variant_index: usize },
+ SetDiscriminant { place: Place<'tcx>, variant_index: usize },
/// Start a live range for the storage of the local.
StorageLive(Local),
inputs: Vec<Operand<'tcx>>
},
- /// Assert the given lvalues to be valid inhabitants of their type. These statements are
+ /// Assert the given places to be valid inhabitants of their type. These statements are
/// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed.
/// See <https://internals.rust-lang.org/t/types-as-contracts/5562/73> for more details.
Validate(ValidationOp, Vec<ValidationOperand<'tcx, Place<'tcx>>>),
/// `Validate` statement.
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)]
pub enum ValidationOp {
- /// Recursively traverse the lvalue following the type and validate that all type
+ /// Recursively traverse the place following the type and validate that all type
/// invariants are maintained. Furthermore, acquire exclusive/read-only access to the
- /// memory reachable from the lvalue.
+ /// memory reachable from the place.
Acquire,
/// Recursive traverse the *mutable* part of the type and relinquish all exclusive
/// access.
// This is generic so that it can be reused by miri
#[derive(Clone, RustcEncodable, RustcDecodable)]
pub struct ValidationOperand<'tcx, T> {
- pub lval: T,
+ pub place: T,
pub ty: Ty<'tcx>,
pub re: Option<region::Scope>,
pub mutbl: hir::Mutability,
impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
- write!(fmt, "{:?}: {:?}", self.lval, self.ty)?;
+ write!(fmt, "{:?}: {:?}", self.place, self.ty)?;
if let Some(ce) = self.re {
// (reuse lifetime rendering policy from ppaux.)
write!(fmt, "/{}", ty::ReScope(ce))?;
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::StatementKind::*;
match self.kind {
- Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv),
+ Assign(ref place, ref rv) => write!(fmt, "{:?} = {:?}", place, rv),
// (reuse lifetime rendering policy from ppaux.)
EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)),
- Validate(ref op, ref lvalues) => write!(fmt, "Validate({:?}, {:?})", op, lvalues),
- StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv),
- StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv),
- SetDiscriminant{lvalue: ref lv, variant_index: index} => {
- write!(fmt, "discriminant({:?}) = {:?}", lv, index)
+ Validate(ref op, ref places) => write!(fmt, "Validate({:?}, {:?})", op, places),
+ StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
+ StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
+ SetDiscriminant { ref place, variant_index } => {
+ write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
},
InlineAsm { ref asm, ref outputs, ref inputs } => {
write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs)
/// static or static mut variable
Static(Box<Static<'tcx>>),
- /// projection out of an lvalue (access a field, deref a pointer, etc)
+ /// projection out of a place (access a field, deref a pointer, etc)
Projection(Box<PlaceProjection<'tcx>>),
}
Downcast(&'tcx AdtDef, usize),
}
-/// Alias for projections as they appear in lvalues, where the base is an lvalue
+/// Alias for projections as they appear in places, where the base is a place
/// and the index is a local.
pub type PlaceProjection<'tcx> = Projection<'tcx, Place<'tcx>, Local, Ty<'tcx>>;
-/// Alias for projections as they appear in lvalues, where the base is an lvalue
+/// Alias for projections as they appear in places, where the base is a place
/// and the index is a local.
pub type PlaceElem<'tcx> = ProjectionElem<'tcx, Local, Ty<'tcx>>;
// Operands
/// These are values that can appear inside an rvalue (or an index
-/// lvalue). They are intentionally limited to prevent rvalues from
+/// place). They are intentionally limited to prevent rvalues from
/// being nested in one another.
#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
pub enum Operand<'tcx> {
/// Copy: The value must be available for use afterwards.
///
- /// This implies that the type of the lvalue must be `Copy`; this is true
+ /// This implies that the type of the place must be `Copy`; this is true
/// by construction during build, but also checked by the MIR type checker.
Copy(Place<'tcx>),
/// Move: The value (including old borrows of it) will not be used again.
use self::Operand::*;
match *self {
Constant(ref a) => write!(fmt, "{:?}", a),
- Copy(ref lv) => write!(fmt, "{:?}", lv),
- Move(ref lv) => write!(fmt, "move {:?}", lv),
+ Copy(ref place) => write!(fmt, "{:?}", place),
+ Move(ref place) => write!(fmt, "move {:?}", place),
}
}
}
use self::Rvalue::*;
match *self {
- Use(ref lvalue) => write!(fmt, "{:?}", lvalue),
+ Use(ref place) => write!(fmt, "{:?}", place),
Repeat(ref a, ref b) => write!(fmt, "[{:?}; {:?}]", a, b),
Len(ref a) => write!(fmt, "Len({:?})", a),
- Cast(ref kind, ref lv, ref ty) => write!(fmt, "{:?} as {:?} ({:?})", lv, ty, kind),
+ Cast(ref kind, ref place, ref ty) => {
+ write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
+ }
BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
CheckedBinaryOp(ref op, ref a, ref b) => {
write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
}
UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
- Discriminant(ref lval) => write!(fmt, "discriminant({:?})", lval),
+ Discriminant(ref place) => write!(fmt, "discriminant({:?})", place),
NullaryOp(ref op, ref t) => write!(fmt, "{:?}({:?})", op, t),
- Ref(region, borrow_kind, ref lv) => {
+ Ref(region, borrow_kind, ref place) => {
let kind_str = match borrow_kind {
BorrowKind::Shared => "",
BorrowKind::Mut | BorrowKind::Unique => "mut ",
// Do not even print 'static
"".to_owned()
};
- write!(fmt, "&{}{}{:?}", region, kind_str, lv)
+ write!(fmt, "&{}{}{:?}", region, kind_str, place)
}
- Aggregate(ref kind, ref lvs) => {
- fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result {
+ Aggregate(ref kind, ref places) => {
+ fn fmt_tuple(fmt: &mut Formatter, places: &[Operand]) -> fmt::Result {
let mut tuple_fmt = fmt.debug_tuple("");
- for lv in lvs {
- tuple_fmt.field(lv);
+ for place in places {
+ tuple_fmt.field(place);
}
tuple_fmt.finish()
}
match **kind {
- AggregateKind::Array(_) => write!(fmt, "{:?}", lvs),
+ AggregateKind::Array(_) => write!(fmt, "{:?}", places),
AggregateKind::Tuple => {
- match lvs.len() {
+ match places.len() {
0 => write!(fmt, "()"),
- 1 => write!(fmt, "({:?},)", lvs[0]),
- _ => fmt_tuple(fmt, lvs),
+ 1 => write!(fmt, "({:?},)", places[0]),
+ _ => fmt_tuple(fmt, places),
}
}
match variant_def.ctor_kind {
CtorKind::Const => Ok(()),
- CtorKind::Fn => fmt_tuple(fmt, lvs),
+ CtorKind::Fn => fmt_tuple(fmt, places),
CtorKind::Fictive => {
let mut struct_fmt = fmt.debug_struct("");
- for (field, lv) in variant_def.fields.iter().zip(lvs) {
- struct_fmt.field(&field.name.as_str(), lv);
+ for (field, place) in variant_def.fields.iter().zip(places) {
+ struct_fmt.field(&field.name.as_str(), place);
}
struct_fmt.finish()
}
let mut struct_fmt = fmt.debug_struct(&name);
tcx.with_freevars(node_id, |freevars| {
- for (freevar, lv) in freevars.iter().zip(lvs) {
+ for (freevar, place) in freevars.iter().zip(places) {
let var_name = tcx.hir.name(freevar.var_id());
- struct_fmt.field(&var_name.as_str(), lv);
+ struct_fmt.field(&var_name.as_str(), place);
}
});
let mut struct_fmt = fmt.debug_struct(&name);
tcx.with_freevars(node_id, |freevars| {
- for (freevar, lv) in freevars.iter().zip(lvs) {
+ for (freevar, place) in freevars.iter().zip(places) {
let var_name = tcx.hir.name(freevar.var_id());
- struct_fmt.field(&var_name.as_str(), lv);
+ struct_fmt.field(&var_name.as_str(), place);
}
- struct_fmt.field("$state", &lvs[freevars.len()]);
- for i in (freevars.len() + 1)..lvs.len() {
+ struct_fmt.field("$state", &places[freevars.len()]);
+ for i in (freevars.len() + 1)..places.len() {
struct_fmt.field(&format!("${}", i - freevars.len() - 1),
- &lvs[i]);
+ &places[i]);
}
});
impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ValidationOperand {
- lval: self.lval.fold_with(folder),
+ place: self.place.fold_with(folder),
ty: self.ty.fold_with(folder),
re: self.re,
mutbl: self.mutbl,
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
- self.lval.visit_with(visitor) || self.ty.visit_with(visitor)
+ self.place.visit_with(visitor) || self.ty.visit_with(visitor)
}
}
use mir::StatementKind::*;
let kind = match self.kind {
- Assign(ref lval, ref rval) => Assign(lval.fold_with(folder), rval.fold_with(folder)),
- SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant {
- lvalue: lvalue.fold_with(folder),
+ Assign(ref place, ref rval) => Assign(place.fold_with(folder), rval.fold_with(folder)),
+ SetDiscriminant { ref place, variant_index } => SetDiscriminant {
+ place: place.fold_with(folder),
variant_index,
},
StorageLive(ref local) => StorageLive(local.fold_with(folder)),
// trait with a `fn fold_scope`.
EndRegion(ref region_scope) => EndRegion(region_scope.clone()),
- Validate(ref op, ref lvals) =>
+ Validate(ref op, ref places) =>
Validate(op.clone(),
- lvals.iter().map(|operand| operand.fold_with(folder)).collect()),
+ places.iter().map(|operand| operand.fold_with(folder)).collect()),
Nop => Nop,
};
use mir::StatementKind::*;
match self.kind {
- Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) }
- SetDiscriminant { ref lvalue, .. } => lvalue.visit_with(visitor),
+ Assign(ref place, ref rval) => { place.visit_with(visitor) || rval.visit_with(visitor) }
+ SetDiscriminant { ref place, .. } => place.visit_with(visitor),
StorageLive(ref local) |
StorageDead(ref local) => local.visit_with(visitor),
InlineAsm { ref outputs, ref inputs, .. } =>
// trait with a `fn visit_scope`.
EndRegion(ref _scope) => false,
- Validate(ref _op, ref lvalues) =>
- lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)),
+ Validate(ref _op, ref places) =>
+ places.iter().any(|ty_and_place| ty_and_place.visit_with(visitor)),
Nop => false,
}
match *self {
Use(ref op) => Use(op.fold_with(folder)),
Repeat(ref op, len) => Repeat(op.fold_with(folder), len),
- Ref(region, bk, ref lval) => Ref(region.fold_with(folder), bk, lval.fold_with(folder)),
- Len(ref lval) => Len(lval.fold_with(folder)),
+ Ref(region, bk, ref place) =>
+ Ref(region.fold_with(folder), bk, place.fold_with(folder)),
+ Len(ref place) => Len(place.fold_with(folder)),
Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)),
BinaryOp(op, ref rhs, ref lhs) =>
BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
CheckedBinaryOp(op, ref rhs, ref lhs) =>
CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)),
- Discriminant(ref lval) => Discriminant(lval.fold_with(folder)),
+ Discriminant(ref place) => Discriminant(place.fold_with(folder)),
NullaryOp(op, ty) => NullaryOp(op, ty.fold_with(folder)),
Aggregate(ref kind, ref fields) => {
let kind = box match **kind {
match *self {
Use(ref op) => op.visit_with(visitor),
Repeat(ref op, _) => op.visit_with(visitor),
- Ref(region, _, ref lval) => region.visit_with(visitor) || lval.visit_with(visitor),
- Len(ref lval) => lval.visit_with(visitor),
+ Ref(region, _, ref place) => region.visit_with(visitor) || place.visit_with(visitor),
+ Len(ref place) => place.visit_with(visitor),
Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor),
BinaryOp(_, ref rhs, ref lhs) |
CheckedBinaryOp(_, ref rhs, ref lhs) =>
rhs.visit_with(visitor) || lhs.visit_with(visitor),
UnaryOp(_, ref val) => val.visit_with(visitor),
- Discriminant(ref lval) => lval.visit_with(visitor),
+ Discriminant(ref place) => place.visit_with(visitor),
NullaryOp(_, ty) => ty.visit_with(visitor),
Aggregate(ref kind, ref fields) => {
(match **kind {
impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
- Operand::Copy(ref lval) => Operand::Copy(lval.fold_with(folder)),
- Operand::Move(ref lval) => Operand::Move(lval.fold_with(folder)),
+ Operand::Copy(ref place) => Operand::Copy(place.fold_with(folder)),
+ Operand::Move(ref place) => Operand::Move(place.fold_with(folder)),
Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)),
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match *self {
- Operand::Copy(ref lval) |
- Operand::Move(ref lval) => lval.visit_with(visitor),
+ Operand::Copy(ref place) |
+ Operand::Move(ref place) => place.visit_with(visitor),
Operand::Constant(ref c) => c.visit_with(visitor)
}
}
Rvalue::Repeat(ref operand, count) => {
tcx.mk_array_const_usize(operand.ty(local_decls, tcx), count)
}
- Rvalue::Ref(reg, bk, ref lv) => {
- let lv_ty = lv.ty(local_decls, tcx).to_ty(tcx);
+ Rvalue::Ref(reg, bk, ref place) => {
+ let place_ty = place.ty(local_decls, tcx).to_ty(tcx);
tcx.mk_ref(reg,
ty::TypeAndMut {
- ty: lv_ty,
+ ty: place_ty,
mutbl: bk.to_mutbl_lossy()
}
)
Rvalue::UnaryOp(UnOp::Neg, ref operand) => {
operand.ty(local_decls, tcx)
}
- Rvalue::Discriminant(ref lval) => {
- let ty = lval.ty(local_decls, tcx).to_ty(tcx);
+ Rvalue::Discriminant(ref place) => {
+ let ty = place.ty(local_decls, tcx).to_ty(tcx);
if let ty::TyAdt(adt_def, _) = ty.sty {
adt_def.repr.discr_type().to_ty(tcx)
} else {
fn visit_assign(&mut self,
block: BasicBlock,
- lvalue: & $($mutability)* Place<'tcx>,
+ place: & $($mutability)* Place<'tcx>,
rvalue: & $($mutability)* Rvalue<'tcx>,
location: Location) {
- self.super_assign(block, lvalue, rvalue, location);
+ self.super_assign(block, place, rvalue, location);
}
fn visit_terminator(&mut self,
self.super_operand(operand, location);
}
- fn visit_lvalue(&mut self,
- lvalue: & $($mutability)* Place<'tcx>,
+ fn visit_place(&mut self,
+ place: & $($mutability)* Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- self.super_lvalue(lvalue, context, location);
+ self.super_place(place, context, location);
}
fn visit_static(&mut self,
}
fn visit_projection(&mut self,
- lvalue: & $($mutability)* PlaceProjection<'tcx>,
+ place: & $($mutability)* PlaceProjection<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- self.super_projection(lvalue, context, location);
+ self.super_projection(place, context, location);
}
fn visit_projection_elem(&mut self,
- lvalue: & $($mutability)* PlaceElem<'tcx>,
+ place: & $($mutability)* PlaceElem<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- self.super_projection_elem(lvalue, context, location);
+ self.super_projection_elem(place, context, location);
}
fn visit_branch(&mut self,
self.visit_source_info(source_info);
match *kind {
- StatementKind::Assign(ref $($mutability)* lvalue,
+ StatementKind::Assign(ref $($mutability)* place,
ref $($mutability)* rvalue) => {
- self.visit_assign(block, lvalue, rvalue, location);
+ self.visit_assign(block, place, rvalue, location);
}
StatementKind::EndRegion(_) => {}
- StatementKind::Validate(_, ref $($mutability)* lvalues) => {
- for operand in lvalues {
- self.visit_lvalue(& $($mutability)* operand.lval,
+ StatementKind::Validate(_, ref $($mutability)* places) => {
+ for operand in places {
+ self.visit_place(& $($mutability)* operand.place,
PlaceContext::Validate, location);
self.visit_ty(& $($mutability)* operand.ty,
TyContext::Location(location));
}
}
- StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => {
- self.visit_lvalue(lvalue, PlaceContext::Store, location);
+ StatementKind::SetDiscriminant{ ref $($mutability)* place, .. } => {
+ self.visit_place(place, PlaceContext::Store, location);
}
StatementKind::StorageLive(ref $($mutability)* local) => {
self.visit_local(local, PlaceContext::StorageLive, location);
ref $($mutability)* inputs,
asm: _ } => {
for output in & $($mutability)* outputs[..] {
- self.visit_lvalue(output, PlaceContext::Store, location);
+ self.visit_place(output, PlaceContext::Store, location);
}
for input in & $($mutability)* inputs[..] {
self.visit_operand(input, location);
fn super_assign(&mut self,
_block: BasicBlock,
- lvalue: &$($mutability)* Place<'tcx>,
+ place: &$($mutability)* Place<'tcx>,
rvalue: &$($mutability)* Rvalue<'tcx>,
location: Location) {
- self.visit_lvalue(lvalue, PlaceContext::Store, location);
+ self.visit_place(place, PlaceContext::Store, location);
self.visit_rvalue(rvalue, location);
}
TerminatorKind::Drop { ref $($mutability)* location,
target,
unwind } => {
- self.visit_lvalue(location, PlaceContext::Drop, source_location);
+ self.visit_place(location, PlaceContext::Drop, source_location);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
ref $($mutability)* value,
target,
unwind } => {
- self.visit_lvalue(location, PlaceContext::Drop, source_location);
+ self.visit_place(location, PlaceContext::Drop, source_location);
self.visit_operand(value, source_location);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
self.visit_operand(arg, source_location);
}
if let Some((ref $($mutability)* destination, target)) = *destination {
- self.visit_lvalue(destination, PlaceContext::Call, source_location);
+ self.visit_place(destination, PlaceContext::Call, source_location);
self.visit_branch(block, target);
}
cleanup.map(|t| self.visit_branch(block, t));
Rvalue::Ref(ref $($mutability)* r, bk, ref $($mutability)* path) => {
self.visit_region(r, location);
- self.visit_lvalue(path, PlaceContext::Borrow {
+ self.visit_place(path, PlaceContext::Borrow {
region: *r,
kind: bk
}, location);
}
Rvalue::Len(ref $($mutability)* path) => {
- self.visit_lvalue(path, PlaceContext::Inspect, location);
+ self.visit_place(path, PlaceContext::Inspect, location);
}
Rvalue::Cast(_cast_kind,
self.visit_operand(op, location);
}
- Rvalue::Discriminant(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, PlaceContext::Inspect, location);
+ Rvalue::Discriminant(ref $($mutability)* place) => {
+ self.visit_place(place, PlaceContext::Inspect, location);
}
Rvalue::NullaryOp(_op, ref $($mutability)* ty) => {
operand: & $($mutability)* Operand<'tcx>,
location: Location) {
match *operand {
- Operand::Copy(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, PlaceContext::Copy, location);
+ Operand::Copy(ref $($mutability)* place) => {
+ self.visit_place(place, PlaceContext::Copy, location);
}
- Operand::Move(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, PlaceContext::Move, location);
+ Operand::Move(ref $($mutability)* place) => {
+ self.visit_place(place, PlaceContext::Move, location);
}
Operand::Constant(ref $($mutability)* constant) => {
self.visit_constant(constant, location);
}
}
- fn super_lvalue(&mut self,
- lvalue: & $($mutability)* Place<'tcx>,
+ fn super_place(&mut self,
+ place: & $($mutability)* Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- match *lvalue {
+ match *place {
Place::Local(ref $($mutability)* local) => {
self.visit_local(local, context, location);
}
} else {
PlaceContext::Projection(Mutability::Not)
};
- self.visit_lvalue(base, context, location);
+ self.visit_place(base, context, location);
self.visit_projection_elem(elem, context, location);
}
// Being borrowed
Borrow { region: Region<'tcx>, kind: BorrowKind },
- // Used as base for another lvalue, e.g. `x` in `x.y`.
+ // Used as base for another place, e.g. `x` in `x.y`.
//
// The `Mutability` argument specifies whether the projection is being performed in order to
- // (potentially) mutate the lvalue. For example, the projection `x.y` is marked as a mutation
+ // (potentially) mutate the place. For example, the projection `x.y` is marked as a mutation
// in these cases:
//
// x.y = ...;
}
impl<'tcx> PlaceContext<'tcx> {
- /// Returns true if this lvalue context represents a drop.
+ /// Returns true if this place context represents a drop.
pub fn is_drop(&self) -> bool {
match *self {
PlaceContext::Drop => true,
}
}
- /// Returns true if this lvalue context represents a storage live or storage dead marker.
+ /// Returns true if this place context represents a storage live or storage dead marker.
pub fn is_storage_marker(&self) -> bool {
match *self {
PlaceContext::StorageLive | PlaceContext::StorageDead => true,
}
}
- /// Returns true if this lvalue context represents a storage live marker.
+ /// Returns true if this place context represents a storage live marker.
pub fn is_storage_live_marker(&self) -> bool {
match *self {
PlaceContext::StorageLive => true,
}
}
- /// Returns true if this lvalue context represents a storage dead marker.
+ /// Returns true if this place context represents a storage dead marker.
pub fn is_storage_dead_marker(&self) -> bool {
match *self {
PlaceContext::StorageDead => true,
}
}
- /// Returns true if this lvalue context represents a use that potentially changes the value.
+ /// Returns true if this place context represents a use that potentially changes the value.
pub fn is_mutating_use(&self) -> bool {
match *self {
PlaceContext::Store | PlaceContext::Call |
}
}
- /// Returns true if this lvalue context represents a use that does not change the value.
+ /// Returns true if this place context represents a use that does not change the value.
pub fn is_nonmutating_use(&self) -> bool {
match *self {
PlaceContext::Inspect | PlaceContext::Borrow { kind: BorrowKind::Shared, .. } |
// NOTE: NLL RFC calls for *shallow* write; using Deep
// for short-term compat w/ AST-borrowck. Also, switch
// to shallow requires to dataflow: "if this is an
- // assignment `lv = <rvalue>`, then any loan for some
- // path P of which `lv` is a prefix is killed."
- self.mutate_lvalue(ContextKind::AssignLhs.new(location),
+ // assignment `place = <rvalue>`, then any loan for some
+ // path P of which `place` is a prefix is killed."
+ self.mutate_place(ContextKind::AssignLhs.new(location),
(lhs, span), Deep, JustWrite, flow_state);
self.consume_rvalue(ContextKind::AssignRhs.new(location),
(rhs, span), location, flow_state);
}
- StatementKind::SetDiscriminant { ref lvalue, variant_index: _ } => {
- self.mutate_lvalue(ContextKind::SetDiscrim.new(location),
- (lvalue, span),
+ StatementKind::SetDiscriminant { ref place, variant_index: _ } => {
+ self.mutate_place(ContextKind::SetDiscrim.new(location),
+ (place, span),
Shallow(Some(ArtificialField::Discriminant)),
JustWrite,
flow_state);
for (o, output) in asm.outputs.iter().zip(outputs) {
if o.is_indirect {
// FIXME(eddyb) indirect inline asm outputs should
- // be encoeded through MIR lvalue derefs instead.
- self.access_lvalue(context,
+ // be encoeded through MIR place derefs instead.
+ self.access_place(context,
(output, span),
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
(output, span), flow_state);
} else {
- self.mutate_lvalue(context,
+ self.mutate_place(context,
(output, span),
Deep,
if o.is_rw { WriteAndRead } else { JustWrite },
}
StatementKind::StorageDead(local) => {
- self.access_lvalue(ContextKind::StorageDead.new(location),
+ self.access_place(ContextKind::StorageDead.new(location),
(&Place::Local(local), span),
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
self.consume_operand(ContextKind::SwitchInt.new(loc),
(discr, span), flow_state);
}
- TerminatorKind::Drop { location: ref drop_lvalue, target: _, unwind: _ } => {
- self.access_lvalue(ContextKind::Drop.new(loc),
- (drop_lvalue, span),
+ TerminatorKind::Drop { location: ref drop_place, target: _, unwind: _ } => {
+ self.access_place(ContextKind::Drop.new(loc),
+ (drop_place, span),
(Deep, Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state);
}
- TerminatorKind::DropAndReplace { location: ref drop_lvalue,
+ TerminatorKind::DropAndReplace { location: ref drop_place,
value: ref new_value,
target: _,
unwind: _ } => {
- self.mutate_lvalue(ContextKind::DropAndReplace.new(loc),
- (drop_lvalue, span),
+ self.mutate_place(ContextKind::DropAndReplace.new(loc),
+ (drop_place, span),
Deep,
JustWrite,
flow_state);
(arg, span), flow_state);
}
if let Some((ref dest, _/*bb*/)) = *destination {
- self.mutate_lvalue(ContextKind::CallDest.new(loc),
+ self.mutate_place(ContextKind::CallDest.new(loc),
(dest, span),
Deep,
JustWrite,
// so this "extra check" serves as a kind of backup.
let domain = flow_state.borrows.base_results.operator();
for borrow in domain.borrows() {
- let root_lvalue = self.prefixes(
- &borrow.lvalue,
+ let root_place = self.prefixes(
+ &borrow.place,
PrefixSet::All
).last().unwrap();
- match root_lvalue {
+ match root_place {
Place::Static(_) => {
- self.access_lvalue(
+ self.access_place(
ContextKind::StorageDead.new(loc),
- (&root_lvalue, self.mir.source_info(borrow.location).span),
+ (&root_place, self.mir.source_info(borrow.location).span),
(Deep, Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state
);
}
Place::Local(_) => {
- self.access_lvalue(
+ self.access_place(
ContextKind::StorageDead.new(loc),
- (&root_lvalue, self.mir.source_info(borrow.location).span),
+ (&root_place, self.mir.source_info(borrow.location).span),
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state
Shallow(Option<ArtificialField>),
/// From the RFC: "A *deep* access means that all data reachable
- /// through the given lvalue may be invalidated or accesses by
+ /// through the given place may be invalidated or accesses by
/// this action."
Deep,
}
Move,
}
-/// When checking permissions for an lvalue access, this flag is used to indicate that an immutable
-/// local lvalue can be mutated.
+/// When checking permissions for a place access, this flag is used to indicate that an immutable
+/// local place can be mutated.
///
/// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications:
/// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
- /// Checks an access to the given lvalue to see if it is allowed. Examines the set of borrows
+ /// Checks an access to the given place to see if it is allowed. Examines the set of borrows
/// that are in scope, as well as which paths have been initialized, to ensure that (a) the
- /// lvalue is initialized and (b) it is not borrowed in some way that would prevent this
+ /// place is initialized and (b) it is not borrowed in some way that would prevent this
/// access.
///
/// Returns true if an error is reported, false otherwise.
- fn access_lvalue(&mut self,
+ fn access_place(&mut self,
context: Context,
- lvalue_span: (&Place<'tcx>, Span),
+ place_span: (&Place<'tcx>, Span),
kind: (ShallowOrDeep, ReadOrWrite),
is_local_mutation_allowed: LocalMutationIsAllowed,
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
let (sd, rw) = kind;
- let storage_dead_or_drop_local = match (lvalue_span.0, rw) {
+ let storage_dead_or_drop_local = match (place_span.0, rw) {
(&Place::Local(local), Write(WriteKind::StorageDeadOrDrop)) => Some(local),
_ => None
};
}
// Check permissions
- let mut error_reported = self.check_access_permissions(lvalue_span,
+ let mut error_reported = self.check_access_permissions(place_span,
rw,
is_local_mutation_allowed);
self.each_borrow_involving_path(
- context, (sd, lvalue_span.0), flow_state, |this, _index, borrow, common_prefix| {
+ context, (sd, place_span.0), flow_state, |this, _index, borrow, common_prefix| {
match (rw, borrow.kind) {
(Read(_), BorrowKind::Shared) => {
Control::Continue
ReadKind::Copy => {
error_reported = true;
this.report_use_while_mutably_borrowed(
- context, lvalue_span, borrow)
+ context, place_span, borrow)
},
ReadKind::Borrow(bk) => {
let end_issued_loan_span =
&borrow.region);
error_reported = true;
this.report_conflicting_borrow(
- context, common_prefix, lvalue_span, bk,
+ context, common_prefix, place_span, bk,
&borrow, end_issued_loan_span)
}
}
&borrow.region);
error_reported = true;
this.report_conflicting_borrow(
- context, common_prefix, lvalue_span, bk,
+ context, common_prefix, place_span, bk,
&borrow, end_issued_loan_span)
}
WriteKind::StorageDeadOrDrop => {
&borrow.region);
error_reported = true;
this.report_borrowed_value_does_not_live_long_enough(
- context, lvalue_span, end_span)
+ context, place_span, end_span)
},
WriteKind::Mutate => {
error_reported = true;
this.report_illegal_mutation_of_borrowed(
- context, lvalue_span, borrow)
+ context, place_span, borrow)
},
WriteKind::Move => {
error_reported = true;
this.report_move_out_while_borrowed(
- context, lvalue_span, &borrow)
+ context, place_span, &borrow)
},
}
Control::Break
}
}
- fn mutate_lvalue(&mut self,
+ fn mutate_place(&mut self,
context: Context,
- lvalue_span: (&Place<'tcx>, Span),
+ place_span: (&Place<'tcx>, Span),
kind: ShallowOrDeep,
mode: MutateMode,
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
match mode {
MutateMode::WriteAndRead => {
self.check_if_path_is_moved(context, InitializationRequiringAction::Update,
- lvalue_span, flow_state);
+ place_span, flow_state);
}
MutateMode::JustWrite => {
- self.check_if_assigned_path_is_moved(context, lvalue_span, flow_state);
+ self.check_if_assigned_path_is_moved(context, place_span, flow_state);
}
}
- self.access_lvalue(context,
- lvalue_span,
+ self.access_place(context,
+ place_span,
(kind, Write(WriteKind::Mutate)),
LocalMutationIsAllowed::Yes,
flow_state);
// check for reassignments to immutable local variables
- self.check_if_reassignment_to_immutable_state(context, lvalue_span, flow_state);
+ self.check_if_reassignment_to_immutable_state(context, place_span, flow_state);
}
fn consume_rvalue(&mut self,
_location: Location,
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
match *rvalue {
- Rvalue::Ref(_/*rgn*/, bk, ref lvalue) => {
+ Rvalue::Ref(_/*rgn*/, bk, ref place) => {
let access_kind = match bk {
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
BorrowKind::Unique |
BorrowKind::Mut => (Deep, Write(WriteKind::MutableBorrow(bk))),
};
- self.access_lvalue(context,
- (lvalue, span),
+ self.access_place(context,
+ (place, span),
access_kind,
LocalMutationIsAllowed::No,
flow_state);
self.check_if_path_is_moved(context, InitializationRequiringAction::Borrow,
- (lvalue, span), flow_state);
+ (place, span), flow_state);
}
Rvalue::Use(ref operand) |
self.consume_operand(context, (operand, span), flow_state)
}
- Rvalue::Len(ref lvalue) |
- Rvalue::Discriminant(ref lvalue) => {
+ Rvalue::Len(ref place) |
+ Rvalue::Discriminant(ref place) => {
let af = match *rvalue {
Rvalue::Len(..) => ArtificialField::ArrayLength,
Rvalue::Discriminant(..) => ArtificialField::Discriminant,
_ => unreachable!(),
};
- self.access_lvalue(context,
- (lvalue, span),
+ self.access_place(context,
+ (place, span),
(Shallow(Some(af)), Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state);
self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
- (lvalue, span), flow_state);
+ (place, span), flow_state);
}
Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2) |
(operand, span): (&Operand<'tcx>, Span),
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
match *operand {
- Operand::Copy(ref lvalue) => {
- // copy of lvalue: check if this is "copy of frozen path"
+ Operand::Copy(ref place) => {
+ // copy of place: check if this is "copy of frozen path"
// (FIXME: see check_loans.rs)
- self.access_lvalue(context,
- (lvalue, span),
+ self.access_place(context,
+ (place, span),
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state);
// Finally, check if path was already moved.
self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
- (lvalue, span), flow_state);
+ (place, span), flow_state);
}
- Operand::Move(ref lvalue) => {
- // move of lvalue: check if this is move of already borrowed path
- self.access_lvalue(context,
- (lvalue, span),
+ Operand::Move(ref place) => {
+ // move of place: check if this is move of already borrowed path
+ self.access_place(context,
+ (place, span),
(Deep, Write(WriteKind::Move)),
LocalMutationIsAllowed::Yes,
flow_state);
// Finally, check if path was already moved.
self.check_if_path_is_moved(context, InitializationRequiringAction::Use,
- (lvalue, span), flow_state);
+ (place, span), flow_state);
}
Operand::Constant(_) => {}
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
fn check_if_reassignment_to_immutable_state(&mut self,
context: Context,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
let move_data = self.move_data;
// determine if this path has a non-mut owner (and thus needs checking).
- if let Ok(()) = self.is_mutable(lvalue, LocalMutationIsAllowed::No) {
+ if let Ok(()) = self.is_mutable(place, LocalMutationIsAllowed::No) {
return;
}
- if let Err(_) = self.is_mutable(lvalue, LocalMutationIsAllowed::Yes) {
+ if let Err(_) = self.is_mutable(place, LocalMutationIsAllowed::Yes) {
return;
}
- match self.move_path_closest_to(lvalue) {
+ match self.move_path_closest_to(place) {
Ok(mpi) => {
for ii in &move_data.init_path_map[mpi] {
if flow_state.ever_inits.curr_state.contains(ii) {
let first_assign_span = self.move_data.inits[*ii].span;
self.report_illegal_reassignment(
- context, (lvalue, span), first_assign_span);
+ context, (place, span), first_assign_span);
break;
}
}
},
Err(NoMovePathFound::ReachedStatic) => {
- let item_msg = match self.describe_lvalue(lvalue) {
+ let item_msg = match self.describe_place(place) {
Some(name) => format!("immutable static item `{}`", name),
None => "immutable static item".to_owned()
};
fn check_if_path_is_moved(&mut self,
context: Context,
desired_action: InitializationRequiringAction,
- lvalue_span: (&Place<'tcx>, Span),
+ place_span: (&Place<'tcx>, Span),
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
- // FIXME: analogous code in check_loans first maps `lvalue` to
+ // FIXME: analogous code in check_loans first maps `place` to
// its base_path ... but is that what we want here?
- let lvalue = self.base_path(lvalue_span.0);
+ let place = self.base_path(place_span.0);
let maybe_uninits = &flow_state.uninits;
let curr_move_outs = &flow_state.move_outs.curr_state;
// Therefore, if we seek out the *closest* prefix for which we
// have a MovePath, that should capture the initialization
- // state for the lvalue scenario.
+ // state for the place scenario.
//
// This code covers scenarios 1, 2, and 4.
- debug!("check_if_path_is_moved part1 lvalue: {:?}", lvalue);
- match self.move_path_closest_to(lvalue) {
+ debug!("check_if_path_is_moved part1 place: {:?}", place);
+ match self.move_path_closest_to(place) {
Ok(mpi) => {
if maybe_uninits.curr_state.contains(&mpi) {
self.report_use_of_moved_or_uninitialized(context, desired_action,
- lvalue_span, mpi,
+ place_span, mpi,
curr_move_outs);
return; // don't bother finding other problems.
}
// to do such a query based on partial-init feature-gate.)
}
- // A move of any shallow suffix of `lvalue` also interferes
- // with an attempt to use `lvalue`. This is scenario 3 above.
+ // A move of any shallow suffix of `place` also interferes
+ // with an attempt to use `place`. This is scenario 3 above.
//
// (Distinct from handling of scenarios 1+2+4 above because
- // `lvalue` does not interfere with suffixes of its prefixes,
+ // `place` does not interfere with suffixes of its prefixes,
// e.g. `a.b.c` does not interfere with `a.b.d`)
- debug!("check_if_path_is_moved part2 lvalue: {:?}", lvalue);
- if let Some(mpi) = self.move_path_for_lvalue(lvalue) {
+ debug!("check_if_path_is_moved part2 place: {:?}", place);
+ if let Some(mpi) = self.move_path_for_place(place) {
if let Some(child_mpi) = maybe_uninits.has_any_child_of(mpi) {
self.report_use_of_moved_or_uninitialized(context, desired_action,
- lvalue_span, child_mpi,
+ place_span, child_mpi,
curr_move_outs);
return; // don't bother finding other problems.
}
}
}
- /// Currently MoveData does not store entries for all lvalues in
+ /// Currently MoveData does not store entries for all places in
/// the input MIR. For example it will currently filter out
- /// lvalues that are Copy; thus we do not track lvalues of shared
- /// reference type. This routine will walk up an lvalue along its
- /// prefixes, searching for a foundational lvalue that *is*
+ /// places that are Copy; thus we do not track places of shared
+ /// reference type. This routine will walk up a place along its
+ /// prefixes, searching for a foundational place that *is*
/// tracked in the MoveData.
///
/// An Err result includes a tag indicated why the search failed.
- /// Currenly this can only occur if the lvalue is built off of a
+ /// Currenly this can only occur if the place is built off of a
/// static variable, as we do not track those in the MoveData.
- fn move_path_closest_to(&mut self, lvalue: &Place<'tcx>)
+ fn move_path_closest_to(&mut self, place: &Place<'tcx>)
-> Result<MovePathIndex, NoMovePathFound>
{
- let mut last_prefix = lvalue;
- for prefix in self.prefixes(lvalue, PrefixSet::All) {
- if let Some(mpi) = self.move_path_for_lvalue(prefix) {
+ let mut last_prefix = place;
+ for prefix in self.prefixes(place, PrefixSet::All) {
+ if let Some(mpi) = self.move_path_for_place(prefix) {
return Ok(mpi);
}
last_prefix = prefix;
}
}
- fn move_path_for_lvalue(&mut self,
- lvalue: &Place<'tcx>)
+ fn move_path_for_place(&mut self,
+ place: &Place<'tcx>)
-> Option<MovePathIndex>
{
// If returns None, then there is no move path corresponding
- // to a direct owner of `lvalue` (which means there is nothing
+ // to a direct owner of `place` (which means there is nothing
// that borrowck tracks for its analysis).
- match self.move_data.rev_lookup.find(lvalue) {
+ match self.move_data.rev_lookup.find(place) {
LookupResult::Parent(_) => None,
LookupResult::Exact(mpi) => Some(mpi),
}
fn check_if_assigned_path_is_moved(&mut self,
context: Context,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
flow_state: &InProgress<'cx, 'gcx, 'tcx>) {
- // recur down lvalue; dispatch to check_if_path_is_moved when necessary
- let mut lvalue = lvalue;
+ // recur down place; dispatch to check_if_path_is_moved when necessary
+ let mut place = place;
loop {
- match *lvalue {
+ match *place {
Place::Local(_) | Place::Static(_) => {
// assigning to `x` does not require `x` be initialized.
break;
}
}
- lvalue = base;
+ place = base;
continue;
}
}
}
}
- /// Check the permissions for the given lvalue and read or write kind
+ /// Check the permissions for the given place and read or write kind
///
/// Returns true if an error is reported, false otherwise.
fn check_access_permissions(&self,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
kind: ReadOrWrite,
is_local_mutation_allowed: LocalMutationIsAllowed)
-> bool {
debug!("check_access_permissions({:?}, {:?}, {:?})",
- lvalue, kind, is_local_mutation_allowed);
+ place, kind, is_local_mutation_allowed);
let mut error_reported = false;
match kind {
Write(WriteKind::MutableBorrow(BorrowKind::Unique)) => {
- if let Err(_lvalue_err) = self.is_unique(lvalue) {
- span_bug!(span, "&unique borrow for {:?} should not fail", lvalue);
+ if let Err(_place_err) = self.is_unique(place) {
+ span_bug!(span, "&unique borrow for {:?} should not fail", place);
}
},
Write(WriteKind::MutableBorrow(BorrowKind::Mut)) => {
- if let Err(lvalue_err) = self.is_mutable(lvalue, is_local_mutation_allowed) {
+ if let Err(place_err) = self.is_mutable(place, is_local_mutation_allowed) {
error_reported = true;
- let item_msg = match self.describe_lvalue(lvalue) {
+ let item_msg = match self.describe_place(place) {
Some(name) => format!("immutable item `{}`", name),
None => "immutable item".to_owned()
};
Origin::Mir);
err.span_label(span, "cannot borrow as mutable");
- if lvalue != lvalue_err {
- if let Some(name) = self.describe_lvalue(lvalue_err) {
+ if place != place_err {
+ if let Some(name) = self.describe_place(place_err) {
err.note(&format!("Value not mutable causing this error: `{}`", name));
}
}
}
},
Write(WriteKind::Mutate) => {
- if let Err(lvalue_err) = self.is_mutable(lvalue, is_local_mutation_allowed) {
+ if let Err(place_err) = self.is_mutable(place, is_local_mutation_allowed) {
error_reported = true;
- let item_msg = match self.describe_lvalue(lvalue) {
+ let item_msg = match self.describe_place(place) {
Some(name) => format!("immutable item `{}`", name),
None => "immutable item".to_owned()
};
Origin::Mir);
err.span_label(span, "cannot mutate");
- if lvalue != lvalue_err {
- if let Some(name) = self.describe_lvalue(lvalue_err) {
+ if place != place_err {
+ if let Some(name) = self.describe_place(place_err) {
err.note(&format!("Value not mutable causing this error: `{}`", name));
}
}
Write(WriteKind::Move) |
Write(WriteKind::StorageDeadOrDrop) |
Write(WriteKind::MutableBorrow(BorrowKind::Shared)) => {
- if let Err(_lvalue_err) = self.is_mutable(lvalue, is_local_mutation_allowed) {
+ if let Err(_place_err) = self.is_mutable(place, is_local_mutation_allowed) {
self.tcx.sess.delay_span_bug(span,
&format!("Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
- lvalue,
+ place,
kind));
}
},
/// Can this value be written or borrowed mutably
fn is_mutable<'d>(&self,
- lvalue: &'d Place<'tcx>,
+ place: &'d Place<'tcx>,
is_local_mutation_allowed: LocalMutationIsAllowed)
-> Result<(), &'d Place<'tcx>> {
- match *lvalue {
+ match *place {
Place::Local(local) => {
let local = &self.mir.local_decls[local];
match local.mutability {
Mutability::Not =>
match is_local_mutation_allowed {
LocalMutationIsAllowed::Yes => Ok(()),
- LocalMutationIsAllowed::No => Err(lvalue),
+ LocalMutationIsAllowed::No => Err(place),
},
Mutability::Mut => Ok(())
}
},
Place::Static(ref static_) => {
if !self.tcx.is_static_mut(static_.def_id) {
- Err(lvalue)
+ Err(place)
} else {
Ok(())
}
ty::TyRef(_, tnm) => {
match tnm.mutbl {
// Shared borrowed data is never mutable
- hir::MutImmutable => Err(lvalue),
+ hir::MutImmutable => Err(place),
// Mutably borrowed data is mutable, but only if we have a
// unique path to the `&mut`
hir::MutMutable => {
ty::TyRawPtr(tnm) => {
match tnm.mutbl {
// `*const` raw pointers are not mutable
- hir::MutImmutable => Err(lvalue),
+ hir::MutImmutable => Err(place),
// `*mut` raw pointers are always mutable, regardless of context
// The users have to check by themselve.
hir::MutMutable => Ok(()),
ProjectionElem::ConstantIndex{..} |
ProjectionElem::Subslice{..} |
ProjectionElem::Downcast(..) => {
- let field_projection = self.is_upvar_field_projection(lvalue);
+ let field_projection = self.is_upvar_field_projection(place);
if let Some(field) = field_projection {
let decl = &self.mir.upvar_decls[field.index()];
return match decl.mutability {
Mutability::Mut => self.is_unique(&proj.base),
- Mutability::Not => Err(lvalue),
+ Mutability::Not => Err(place),
};
}
}
}
- /// Does this lvalue have a unique path
- fn is_unique<'d>(&self, lvalue: &'d Place<'tcx>) -> Result<(), &'d Place<'tcx>> {
- match *lvalue {
+ /// Does this place have a unique path
+ fn is_unique<'d>(&self, place: &'d Place<'tcx>) -> Result<(), &'d Place<'tcx>> {
+ match *place {
Place::Local(..) => {
// Local variables are unique
Ok(())
},
Place::Static(..) => {
// Static variables are not
- Err(lvalue)
+ Err(place)
},
Place::Projection(ref proj) => {
match proj.elem {
match base_ty.sty {
ty::TyRef(_, tnm) => {
match tnm.mutbl {
- // lvalue represent an aliased location
- hir::MutImmutable => Err(lvalue),
+ // place represent an aliased location
+ hir::MutImmutable => Err(place),
// `&mut T` is as unique as the context in which it is found
hir::MutMutable => self.is_unique(&proj.base),
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
fn each_borrow_involving_path<F>(&mut self,
_context: Context,
- access_lvalue: (ShallowOrDeep, &Place<'tcx>),
+ access_place: (ShallowOrDeep, &Place<'tcx>),
flow_state: &InProgress<'cx, 'gcx, 'tcx>,
mut op: F)
where F: FnMut(&mut Self, BorrowIndex, &BorrowData<'tcx>, &Place<'tcx>) -> Control
{
- let (access, lvalue) = access_lvalue;
+ let (access, place) = access_place;
- // FIXME: analogous code in check_loans first maps `lvalue` to
+ // FIXME: analogous code in check_loans first maps `place` to
// its base_path.
let domain = flow_state.borrows.base_results.operator();
'next_borrow: for i in flow_state.borrows.elems_incoming() {
let borrowed = &data[i];
- // Is `lvalue` (or a prefix of it) already borrowed? If
+ // Is `place` (or a prefix of it) already borrowed? If
// so, that's relevant.
//
// FIXME: Differs from AST-borrowck; includes drive-by fix
// to #38899. Will probably need back-compat mode flag.
- for accessed_prefix in self.prefixes(lvalue, PrefixSet::All) {
- if *accessed_prefix == borrowed.lvalue {
+ for accessed_prefix in self.prefixes(place, PrefixSet::All) {
+ if *accessed_prefix == borrowed.place {
// FIXME: pass in enum describing case we are in?
let ctrl = op(self, i, borrowed, accessed_prefix);
if ctrl == Control::Break { return; }
}
}
- // Is `lvalue` a prefix (modulo access type) of the
- // `borrowed.lvalue`? If so, that's relevant.
+ // Is `place` a prefix (modulo access type) of the
+ // `borrowed.place`? If so, that's relevant.
let prefix_kind = match access {
Shallow(Some(ArtificialField::Discriminant)) |
// additional fields on the type; they do not
// overlap any existing data there. Furthermore,
// they cannot actually be a prefix of any
- // borrowed lvalue (at least in MIR as it is
+ // borrowed place (at least in MIR as it is
// currently.)
continue 'next_borrow;
}
Deep => PrefixSet::Supporting,
};
- for borrowed_prefix in self.prefixes(&borrowed.lvalue, prefix_kind) {
- if borrowed_prefix == lvalue {
+ for borrowed_prefix in self.prefixes(&borrowed.place, prefix_kind) {
+ if borrowed_prefix == place {
// FIXME: pass in enum describing case we are in?
let ctrl = op(self, i, borrowed, borrowed_prefix);
if ctrl == Control::Break { return; }
use self::prefixes::PrefixSet;
/// From the NLL RFC: "The deep [aka 'supporting'] prefixes for an
-/// lvalue are formed by stripping away fields and derefs, except that
+/// place are formed by stripping away fields and derefs, except that
/// we stop when we reach the deref of a shared reference. [...] "
///
/// "Shallow prefixes are found by stripping away fields, but stop at
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
- /// Returns an iterator over the prefixes of `lvalue`
+ /// Returns an iterator over the prefixes of `place`
/// (inclusive) from longest to smallest, potentially
/// terminating the iteration early based on `kind`.
pub(super) fn prefixes(&self,
- lvalue: &'cx Place<'tcx>,
+ place: &'cx Place<'tcx>,
kind: PrefixSet)
-> Prefixes<'cx, 'gcx, 'tcx>
{
- Prefixes { next: Some(lvalue), kind, mir: self.mir, tcx: self.tcx }
+ Prefixes { next: Some(place), kind, mir: self.mir, tcx: self.tcx }
}
}
fn next(&mut self) -> Option<Self::Item> {
let mut cursor = match self.next {
None => return None,
- Some(lvalue) => lvalue,
+ Some(place) => place,
};
- // Post-processing `lvalue`: Enqueue any remaining
- // work. Also, `lvalue` may not be a prefix itself, but
+ // Post-processing `place`: Enqueue any remaining
+ // work. Also, `place` may not be a prefix itself, but
// may hold one further down (e.g. we never return
// downcasts here, but may return a base of a downcast).
fn report_use_of_moved_or_uninitialized(&mut self,
_context: Context,
desired_action: InitializationRequiringAction,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
mpi: MovePathIndex,
curr_move_out: &IdxSetBuf<MoveOutIndex>) {
|moi| curr_move_out.contains(moi)).collect::<Vec<_>>();
if mois.is_empty() {
- let item_msg = match self.describe_lvalue(lvalue) {
+ let item_msg = match self.describe_place(place) {
Some(name) => format!("`{}`", name),
None => "value".to_owned()
};
self.tcx.cannot_act_on_uninitialized_variable(span,
desired_action.as_noun(),
- &self.describe_lvalue(lvalue)
+ &self.describe_place(place)
.unwrap_or("_".to_owned()),
Origin::Mir)
.span_label(span, format!("use of possibly uninitialized {}", item_msg))
let mut err = self.tcx.cannot_act_on_moved_value(span,
desired_action.as_noun(),
msg,
- &self.describe_lvalue(lvalue)
+ &self.describe_place(place)
.unwrap_or("_".to_owned()),
Origin::Mir);
fn report_move_out_while_borrowed(&mut self,
_context: Context,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
borrow: &BorrowData<'tcx>) {
- let value_msg = match self.describe_lvalue(lvalue) {
+ let value_msg = match self.describe_place(place) {
Some(name) => format!("`{}`", name),
None => "value".to_owned()
};
- let borrow_msg = match self.describe_lvalue(&borrow.lvalue) {
+ let borrow_msg = match self.describe_place(&borrow.place) {
Some(name) => format!("`{}`", name),
None => "value".to_owned()
};
self.tcx.cannot_move_when_borrowed(span,
- &self.describe_lvalue(lvalue).unwrap_or("_".to_owned()),
+ &self.describe_place(place).unwrap_or("_".to_owned()),
Origin::Mir)
.span_label(self.retrieve_borrow_span(borrow),
format!("borrow of {} occurs here", borrow_msg))
fn report_use_while_mutably_borrowed(&mut self,
_context: Context,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
borrow : &BorrowData<'tcx>) {
let mut err = self.tcx.cannot_use_when_mutably_borrowed(
span,
- &self.describe_lvalue(lvalue).unwrap_or("_".to_owned()),
+ &self.describe_place(place).unwrap_or("_".to_owned()),
self.retrieve_borrow_span(borrow),
- &self.describe_lvalue(&borrow.lvalue).unwrap_or("_".to_owned()),
+ &self.describe_place(&borrow.place).unwrap_or("_".to_owned()),
Origin::Mir);
err.emit();
break;
}
- if let StatementKind::Assign(_, Rvalue::Aggregate(ref kind, ref lvs)) = stmt.kind {
+ if let StatementKind::Assign(_, Rvalue::Aggregate(ref kind, ref places)) = stmt.kind {
if let AggregateKind::Closure(def_id, _) = **kind {
- debug!("find_closure_span: found closure {:?}", lvs);
+ debug!("find_closure_span: found closure {:?}", places);
return if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) {
let args_span = if let ExprClosure(_, _, _, span, _) =
};
self.tcx.with_freevars(node_id, |freevars| {
- for (v, lv) in freevars.iter().zip(lvs) {
- match *lv {
+ for (v, place) in freevars.iter().zip(places) {
+ match *place {
Operand::Copy(Place::Local(l)) |
Operand::Move(Place::Local(l)) if local == l => {
debug!(
fn report_conflicting_borrow(&mut self,
context: Context,
common_prefix: &Place<'tcx>,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
gen_borrow_kind: BorrowKind,
issued_borrow: &BorrowData,
end_issued_loan_span: Option<Span>) {
use self::prefixes::IsPrefixOf;
- assert!(common_prefix.is_prefix_of(lvalue));
- assert!(common_prefix.is_prefix_of(&issued_borrow.lvalue));
+ assert!(common_prefix.is_prefix_of(place));
+ assert!(common_prefix.is_prefix_of(&issued_borrow.place));
let issued_span = self.retrieve_borrow_span(issued_borrow);
let old_closure_span = self.find_closure_span(issued_span, issued_borrow.location);
let issued_span = old_closure_span.map(|(args, _)| args).unwrap_or(issued_span);
- let desc_lvalue = self.describe_lvalue(lvalue).unwrap_or("_".to_owned());
+ let desc_place = self.describe_place(place).unwrap_or("_".to_owned());
// FIXME: supply non-"" `opt_via` when appropriate
let mut err = match (gen_borrow_kind, "immutable", "mutable",
(BorrowKind::Shared, lft, _, BorrowKind::Mut, _, rgt) |
(BorrowKind::Mut, _, lft, BorrowKind::Shared, rgt, _) =>
self.tcx.cannot_reborrow_already_borrowed(
- span, &desc_lvalue, "", lft, issued_span,
+ span, &desc_place, "", lft, issued_span,
"it", rgt, "", end_issued_loan_span, Origin::Mir),
(BorrowKind::Mut, _, _, BorrowKind::Mut, _, _) =>
self.tcx.cannot_mutably_borrow_multiply(
- span, &desc_lvalue, "", issued_span,
+ span, &desc_place, "", issued_span,
"", end_issued_loan_span, Origin::Mir),
(BorrowKind::Unique, _, _, BorrowKind::Unique, _, _) =>
self.tcx.cannot_uniquely_borrow_by_two_closures(
- span, &desc_lvalue, issued_span,
+ span, &desc_place, issued_span,
end_issued_loan_span, Origin::Mir),
(BorrowKind::Unique, _, _, _, _, _) =>
self.tcx.cannot_uniquely_borrow_by_one_closure(
- span, &desc_lvalue, "",
+ span, &desc_place, "",
issued_span, "it", "", end_issued_loan_span, Origin::Mir),
(_, _, _, BorrowKind::Unique, _, _) =>
self.tcx.cannot_reborrow_already_uniquely_borrowed(
- span, &desc_lvalue, "it", "",
+ span, &desc_place, "it", "",
issued_span, "", end_issued_loan_span, Origin::Mir),
(BorrowKind::Shared, _, _, BorrowKind::Shared, _, _) =>
if let Some((_, var_span)) = old_closure_span {
err.span_label(
var_span,
- format!("previous borrow occurs due to use of `{}` in closure", desc_lvalue),
+ format!("previous borrow occurs due to use of `{}` in closure", desc_place),
);
}
if let Some((_, var_span)) = new_closure_span {
err.span_label(
var_span,
- format!("borrow occurs due to use of `{}` in closure", desc_lvalue),
+ format!("borrow occurs due to use of `{}` in closure", desc_place),
);
}
fn report_borrowed_value_does_not_live_long_enough(&mut self,
_: Context,
- (lvalue, span): (&Place, Span),
+ (place, span): (&Place, Span),
end_span: Option<Span>) {
- let proper_span = match *lvalue {
+ let proper_span = match *place {
Place::Local(local) => self.mir.local_decls[local].source_info.span,
_ => span
};
fn report_illegal_mutation_of_borrowed(&mut self,
_: Context,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
loan: &BorrowData) {
let mut err = self.tcx.cannot_assign_to_borrowed(
span,
self.retrieve_borrow_span(loan),
- &self.describe_lvalue(lvalue).unwrap_or("_".to_owned()),
+ &self.describe_place(place).unwrap_or("_".to_owned()),
Origin::Mir);
err.emit();
fn report_illegal_reassignment(&mut self,
_context: Context,
- (lvalue, span): (&Place<'tcx>, Span),
+ (place, span): (&Place<'tcx>, Span),
assigned_span: Span) {
let mut err = self.tcx.cannot_reassign_immutable(span,
- &self.describe_lvalue(lvalue).unwrap_or("_".to_owned()),
+ &self.describe_place(place).unwrap_or("_".to_owned()),
Origin::Mir);
err.span_label(span, "cannot assign twice to immutable variable");
if span != assigned_span {
- let value_msg = match self.describe_lvalue(lvalue) {
+ let value_msg = match self.describe_place(place) {
Some(name) => format!("`{}`", name),
None => "value".to_owned()
};
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
- // End-user visible description of `lvalue` if one can be found. If the
- // lvalue is a temporary for instance, None will be returned.
- fn describe_lvalue(&self, lvalue: &Place<'tcx>) -> Option<String> {
+ // End-user visible description of `place` if one can be found. If the
+ // place is a temporary for instance, None will be returned.
+ fn describe_place(&self, place: &Place<'tcx>) -> Option<String> {
let mut buf = String::new();
- match self.append_lvalue_to_string(lvalue, &mut buf, false) {
+ match self.append_place_to_string(place, &mut buf, false) {
Ok(()) => Some(buf),
Err(()) => None
}
/// then returns the index of the field being projected. Note that this closure will always
/// be `self` in the current MIR, because that is the only time we directly access the fields
/// of a closure type.
- fn is_upvar_field_projection(&self, lvalue: &Place<'tcx>) -> Option<Field> {
- match *lvalue {
+ fn is_upvar_field_projection(&self, place: &Place<'tcx>) -> Option<Field> {
+ match *place {
Place::Projection(ref proj) => {
match proj.elem {
ProjectionElem::Field(field, _ty) => {
}
}
- // Appends end-user visible description of `lvalue` to `buf`.
- fn append_lvalue_to_string(&self,
- lvalue: &Place<'tcx>,
+ // Appends end-user visible description of `place` to `buf`.
+ fn append_place_to_string(&self,
+ place: &Place<'tcx>,
buf: &mut String,
mut autoderef: bool) -> Result<(), ()> {
- match *lvalue {
+ match *place {
Place::Local(local) => {
self.append_local_to_string(local, buf,)?;
}
}
} else {
if autoderef {
- self.append_lvalue_to_string(&proj.base, buf, autoderef)?;
+ self.append_place_to_string(&proj.base, buf, autoderef)?;
} else {
buf.push_str(&"*");
- self.append_lvalue_to_string(&proj.base, buf, autoderef)?;
+ self.append_place_to_string(&proj.base, buf, autoderef)?;
}
}
},
ProjectionElem::Downcast(..) => {
- self.append_lvalue_to_string(&proj.base, buf, autoderef)?;
+ self.append_place_to_string(&proj.base, buf, autoderef)?;
},
ProjectionElem::Field(field, _ty) => {
autoderef = true;
- if let Some(field) = self.is_upvar_field_projection(lvalue) {
+ if let Some(field) = self.is_upvar_field_projection(place) {
let var_index = field.index();
let name = self.mir.upvar_decls[var_index].debug_name.to_string();
buf.push_str(&name);
} else {
let field_name = self.describe_field(&proj.base, field);
- self.append_lvalue_to_string(&proj.base, buf, autoderef)?;
+ self.append_place_to_string(&proj.base, buf, autoderef)?;
buf.push_str(&format!(".{}", field_name));
}
},
ProjectionElem::Index(index) => {
autoderef = true;
- self.append_lvalue_to_string(&proj.base, buf, autoderef)?;
+ self.append_place_to_string(&proj.base, buf, autoderef)?;
buf.push_str("[");
if let Err(_) = self.append_local_to_string(index, buf) {
buf.push_str("..");
// Since it isn't possible to borrow an element on a particular index and
// then use another while the borrow is held, don't output indices details
// to avoid confusing the end-user
- self.append_lvalue_to_string(&proj.base, buf, autoderef)?;
+ self.append_place_to_string(&proj.base, buf, autoderef)?;
buf.push_str(&"[..]");
},
};
Ok(())
}
- // Appends end-user visible description of the `local` lvalue to `buf`. If `local` doesn't have
+ // Appends end-user visible description of the `local` place to `buf`. If `local` doesn't have
// a name, then `Err` is returned
fn append_local_to_string(&self, local_index: Local, buf: &mut String) -> Result<(), ()> {
let local = &self.mir.local_decls[local_index];
// moves out of a Box. They should be removed when/if we stop
// treating Box specially (e.g. when/if DerefMove is added...)
- fn base_path<'d>(&self, lvalue: &'d Place<'tcx>) -> &'d Place<'tcx> {
+ fn base_path<'d>(&self, place: &'d Place<'tcx>) -> &'d Place<'tcx> {
//! Returns the base of the leftmost (deepest) dereference of an
- //! Box in `lvalue`. If there is no dereference of an Box
- //! in `lvalue`, then it just returns `lvalue` itself.
+ //! Box in `place`. If there is no dereference of an Box
+ //! in `place`, then it just returns `place` itself.
- let mut cursor = lvalue;
- let mut deepest = lvalue;
+ let mut cursor = place;
+ let mut deepest = place;
loop {
let proj = match *cursor {
Place::Local(..) | Place::Static(..) => return deepest,
Place::Projection(ref proj) => proj,
};
if proj.elem == ProjectionElem::Deref &&
- lvalue.ty(self.mir, self.tcx).to_ty(self.tcx).is_box()
+ place.ty(self.mir, self.tcx).to_ty(self.tcx).is_box()
{
deepest = &proj.base;
}
pub fn push_assign(&mut self,
block: BasicBlock,
source_info: SourceInfo,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
rvalue: Rvalue<'tcx>) {
self.push(block, Statement {
source_info,
- kind: StatementKind::Assign(lvalue.clone(), rvalue)
+ kind: StatementKind::Assign(place.clone(), rvalue)
});
}
pub fn push_assign_unit(&mut self,
block: BasicBlock,
source_info: SourceInfo,
- lvalue: &Place<'tcx>) {
- self.push_assign(block, source_info, lvalue, Rvalue::Aggregate(
+ place: &Place<'tcx>) {
+ self.push_assign(block, source_info, place, Rvalue::Aggregate(
box AggregateKind::Tuple, vec![]
));
}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! See docs in build/expr/mod.rs
-
-use build::{BlockAnd, BlockAndExtension, Builder};
-use build::expr::category::Category;
-use hair::*;
-use rustc::mir::*;
-
-use rustc_data_structures::indexed_vec::Idx;
-
-impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
- /// Compile `expr`, yielding an lvalue that we can move from etc.
- pub fn as_lvalue<M>(&mut self,
- block: BasicBlock,
- expr: M)
- -> BlockAnd<Place<'tcx>>
- where M: Mirror<'tcx, Output=Expr<'tcx>>
- {
- let expr = self.hir.mirror(expr);
- self.expr_as_lvalue(block, expr)
- }
-
- fn expr_as_lvalue(&mut self,
- mut block: BasicBlock,
- expr: Expr<'tcx>)
- -> BlockAnd<Place<'tcx>> {
- debug!("expr_as_lvalue(block={:?}, expr={:?})", block, expr);
-
- let this = self;
- let expr_span = expr.span;
- let source_info = this.source_info(expr_span);
- match expr.kind {
- ExprKind::Scope { region_scope, lint_level, value } => {
- this.in_scope((region_scope, source_info), lint_level, block, |this| {
- this.as_lvalue(block, value)
- })
- }
- ExprKind::Field { lhs, name } => {
- let lvalue = unpack!(block = this.as_lvalue(block, lhs));
- let lvalue = lvalue.field(name, expr.ty);
- block.and(lvalue)
- }
- ExprKind::Deref { arg } => {
- let lvalue = unpack!(block = this.as_lvalue(block, arg));
- let lvalue = lvalue.deref();
- block.and(lvalue)
- }
- ExprKind::Index { lhs, index } => {
- let (usize_ty, bool_ty) = (this.hir.usize_ty(), this.hir.bool_ty());
-
- let slice = unpack!(block = this.as_lvalue(block, lhs));
- // region_scope=None so lvalue indexes live forever. They are scalars so they
- // do not need storage annotations, and they are often copied between
- // places.
- let idx = unpack!(block = this.as_temp(block, None, index));
-
- // bounds check:
- let (len, lt) = (this.temp(usize_ty.clone(), expr_span),
- this.temp(bool_ty, expr_span));
- this.cfg.push_assign(block, source_info, // len = len(slice)
- &len, Rvalue::Len(slice.clone()));
- this.cfg.push_assign(block, source_info, // lt = idx < len
- <, Rvalue::BinaryOp(BinOp::Lt,
- Operand::Copy(Place::Local(idx)),
- Operand::Copy(len.clone())));
-
- let msg = AssertMessage::BoundsCheck {
- len: Operand::Move(len),
- index: Operand::Copy(Place::Local(idx))
- };
- let success = this.assert(block, Operand::Move(lt), true,
- msg, expr_span);
- success.and(slice.index(idx))
- }
- ExprKind::SelfRef => {
- block.and(Place::Local(Local::new(1)))
- }
- ExprKind::VarRef { id } => {
- let index = this.var_indices[&id];
- block.and(Place::Local(index))
- }
- ExprKind::StaticRef { id } => {
- block.and(Place::Static(Box::new(Static { def_id: id, ty: expr.ty })))
- }
-
- ExprKind::Array { .. } |
- ExprKind::Tuple { .. } |
- ExprKind::Adt { .. } |
- ExprKind::Closure { .. } |
- ExprKind::Unary { .. } |
- ExprKind::Binary { .. } |
- ExprKind::LogicalOp { .. } |
- ExprKind::Box { .. } |
- ExprKind::Cast { .. } |
- ExprKind::Use { .. } |
- ExprKind::NeverToAny { .. } |
- ExprKind::ReifyFnPointer { .. } |
- ExprKind::ClosureFnPointer { .. } |
- ExprKind::UnsafeFnPointer { .. } |
- ExprKind::Unsize { .. } |
- ExprKind::Repeat { .. } |
- ExprKind::Borrow { .. } |
- ExprKind::If { .. } |
- ExprKind::Match { .. } |
- ExprKind::Loop { .. } |
- ExprKind::Block { .. } |
- ExprKind::Assign { .. } |
- ExprKind::AssignOp { .. } |
- ExprKind::Break { .. } |
- ExprKind::Continue { .. } |
- ExprKind::Return { .. } |
- ExprKind::Literal { .. } |
- ExprKind::InlineAsm { .. } |
- ExprKind::Yield { .. } |
- ExprKind::Call { .. } => {
- // these are not lvalues, so we need to make a temporary.
- debug_assert!(match Category::of(&expr.kind) {
- Some(Category::Place) => false,
- _ => true,
- });
- let temp = unpack!(block = this.as_temp(block, expr.temp_lifetime, expr));
- block.and(Place::Local(temp))
- }
- }
- }
-}
}
/// Compile `expr` into a value that can be used as an operand.
- /// If `expr` is an lvalue like `x`, this will introduce a
+ /// If `expr` is a place like `x`, this will introduce a
/// temporary `tmp = x`, so that we capture the value of `x` at
/// this time.
///
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! See docs in build/expr/mod.rs
+
+use build::{BlockAnd, BlockAndExtension, Builder};
+use build::expr::category::Category;
+use hair::*;
+use rustc::mir::*;
+
+use rustc_data_structures::indexed_vec::Idx;
+
+impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
+ /// Compile `expr`, yielding a place that we can move from etc.
+ pub fn as_place<M>(&mut self,
+ block: BasicBlock,
+ expr: M)
+ -> BlockAnd<Place<'tcx>>
+ where M: Mirror<'tcx, Output=Expr<'tcx>>
+ {
+ let expr = self.hir.mirror(expr);
+ self.expr_as_place(block, expr)
+ }
+
+ fn expr_as_place(&mut self,
+ mut block: BasicBlock,
+ expr: Expr<'tcx>)
+ -> BlockAnd<Place<'tcx>> {
+ debug!("expr_as_place(block={:?}, expr={:?})", block, expr);
+
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+ match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ this.in_scope((region_scope, source_info), lint_level, block, |this| {
+ this.as_place(block, value)
+ })
+ }
+ ExprKind::Field { lhs, name } => {
+ let place = unpack!(block = this.as_place(block, lhs));
+ let place = place.field(name, expr.ty);
+ block.and(place)
+ }
+ ExprKind::Deref { arg } => {
+ let place = unpack!(block = this.as_place(block, arg));
+ let place = place.deref();
+ block.and(place)
+ }
+ ExprKind::Index { lhs, index } => {
+ let (usize_ty, bool_ty) = (this.hir.usize_ty(), this.hir.bool_ty());
+
+ let slice = unpack!(block = this.as_place(block, lhs));
+ // region_scope=None so place indexes live forever. They are scalars so they
+ // do not need storage annotations, and they are often copied between
+ // places.
+ let idx = unpack!(block = this.as_temp(block, None, index));
+
+ // bounds check:
+ let (len, lt) = (this.temp(usize_ty.clone(), expr_span),
+ this.temp(bool_ty, expr_span));
+ this.cfg.push_assign(block, source_info, // len = len(slice)
+ &len, Rvalue::Len(slice.clone()));
+ this.cfg.push_assign(block, source_info, // lt = idx < len
+ <, Rvalue::BinaryOp(BinOp::Lt,
+ Operand::Copy(Place::Local(idx)),
+ Operand::Copy(len.clone())));
+
+ let msg = AssertMessage::BoundsCheck {
+ len: Operand::Move(len),
+ index: Operand::Copy(Place::Local(idx))
+ };
+ let success = this.assert(block, Operand::Move(lt), true,
+ msg, expr_span);
+ success.and(slice.index(idx))
+ }
+ ExprKind::SelfRef => {
+ block.and(Place::Local(Local::new(1)))
+ }
+ ExprKind::VarRef { id } => {
+ let index = this.var_indices[&id];
+ block.and(Place::Local(index))
+ }
+ ExprKind::StaticRef { id } => {
+ block.and(Place::Static(Box::new(Static { def_id: id, ty: expr.ty })))
+ }
+
+ ExprKind::Array { .. } |
+ ExprKind::Tuple { .. } |
+ ExprKind::Adt { .. } |
+ ExprKind::Closure { .. } |
+ ExprKind::Unary { .. } |
+ ExprKind::Binary { .. } |
+ ExprKind::LogicalOp { .. } |
+ ExprKind::Box { .. } |
+ ExprKind::Cast { .. } |
+ ExprKind::Use { .. } |
+ ExprKind::NeverToAny { .. } |
+ ExprKind::ReifyFnPointer { .. } |
+ ExprKind::ClosureFnPointer { .. } |
+ ExprKind::UnsafeFnPointer { .. } |
+ ExprKind::Unsize { .. } |
+ ExprKind::Repeat { .. } |
+ ExprKind::Borrow { .. } |
+ ExprKind::If { .. } |
+ ExprKind::Match { .. } |
+ ExprKind::Loop { .. } |
+ ExprKind::Block { .. } |
+ ExprKind::Assign { .. } |
+ ExprKind::AssignOp { .. } |
+ ExprKind::Break { .. } |
+ ExprKind::Continue { .. } |
+ ExprKind::Return { .. } |
+ ExprKind::Literal { .. } |
+ ExprKind::InlineAsm { .. } |
+ ExprKind::Yield { .. } |
+ ExprKind::Call { .. } => {
+ // these are not places, so we need to make a temporary.
+ debug_assert!(match Category::of(&expr.kind) {
+ Some(Category::Place) => false,
+ _ => true,
+ });
+ let temp = unpack!(block = this.as_temp(block, expr.temp_lifetime, expr));
+ block.and(Place::Local(temp))
+ }
+ }
+ }
+}
block.and(Rvalue::Repeat(value_operand, count))
}
ExprKind::Borrow { region, borrow_kind, arg } => {
- let arg_lvalue = unpack!(block = this.as_lvalue(block, arg));
- block.and(Rvalue::Ref(region, borrow_kind, arg_lvalue))
+ let arg_place = unpack!(block = this.as_place(block, arg));
+ block.and(Rvalue::Ref(region, borrow_kind, arg_place))
}
ExprKind::Binary { op, lhs, rhs } => {
let lhs = unpack!(block = this.as_operand(block, scope, lhs));
let field_names = this.hir.all_fields(adt_def, variant_index);
let fields = if let Some(FruInfo { base, field_types }) = base {
- let base = unpack!(block = this.as_lvalue(block, base));
+ let base = unpack!(block = this.as_place(block, base));
// MIR does not natively support FRU, so for each
// base-supplied field, generate an operand that
}
// Careful here not to cause an infinite cycle. If we always
- // called `into`, then for lvalues like `x.f`, it would
+ // called `into`, then for places like `x.f`, it would
// eventually fallback to us, and we'd loop. There's a reason
// for this: `as_temp` is the point where we bridge the "by
- // reference" semantics of `as_lvalue` with the "by value"
+ // reference" semantics of `as_place` with the "by value"
// semantics of `into`, `as_operand`, `as_rvalue`, and (of
// course) `as_temp`.
match Category::of(&expr.kind).unwrap() {
Category::Place => {
- let lvalue = unpack!(block = this.as_lvalue(block, expr));
- let rvalue = Rvalue::Use(this.consume_by_copy_or_move(lvalue));
+ let place = unpack!(block = this.as_place(block, expr));
+ let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
this.cfg.push_assign(block, source_info, &Place::Local(temp), rvalue);
}
_ => {
//! - `as_operand` -- evaluates the value and yields an `Operand`,
//! suitable for use as an argument to an `Rvalue`
//! - `as_temp` -- evaluates into a temporary; this is similar to `as_operand`
-//! except it always returns a fresh lvalue, even for constants
+//! except it always returns a fresh place, even for constants
//! - `as_rvalue` -- yields an `Rvalue`, suitable for use in an assignment;
//! as of this writing, never needed outside of the `expr` module itself
//!
//! Sometimes though want the expression's *location*. An example
//! would be during a match statement, or the operand of the `&`
-//! operator. In that case, you want `as_lvalue`. This will create a
+//! operator. In that case, you want `as_place`. This will create a
//! temporary if necessary.
//!
//! Finally, if it's a constant you seek, then call
//! struct expression (or other expression that creates a new value)
//! is typically easiest to write in terms of `as_rvalue` or `into`,
//! whereas a reference to a field is easiest to write in terms of
-//! `as_lvalue`. (The exception to this is scope and paren
+//! `as_place`. (The exception to this is scope and paren
//! expressions, which have no category.)
//!
//! Therefore, the various functions above make use of one another in
//! the most suitable spot to implement it, and then just let the
//! other fns cycle around. The handoff works like this:
//!
-//! - `into(lv)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `lv`
+//! - `into(place)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `place`
//! - `as_rvalue` -> fallback is to create an Operand with `as_operand` and use `Rvalue::use`
//! - `as_operand` -> either invokes `as_constant` or `as_temp`
//! - `as_constant` -> (no fallback)
-//! - `as_temp` -> creates a temporary and either calls `as_lvalue` or `into`
-//! - `as_lvalue` -> for rvalues, falls back to `as_temp` and returns that
+//! - `as_temp` -> creates a temporary and either calls `as_place` or `into`
+//! - `as_place` -> for rvalues, falls back to `as_temp` and returns that
//!
//! As you can see, there is a cycle where `into` can (in theory) fallback to `as_temp`
//! which can fallback to `into`. So if one of the `ExprKind` variants is not, in fact,
//! Of those fallbacks, the most interesting one is `as_temp`, because
//! it discriminates based on the category of the expression. This is
//! basically the point where the "by value" operations are bridged
-//! over to the "by reference" mode (`as_lvalue`).
+//! over to the "by reference" mode (`as_place`).
mod as_constant;
-mod as_lvalue;
+mod as_place;
mod as_rvalue;
mod as_operand;
mod as_temp;
// dropped.
if this.hir.needs_drop(lhs.ty) {
let rhs = unpack!(block = this.as_local_operand(block, rhs));
- let lhs = unpack!(block = this.as_lvalue(block, lhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
unpack!(block = this.build_drop_and_replace(
block, lhs_span, lhs, rhs
));
block.unit()
} else {
let rhs = unpack!(block = this.as_local_rvalue(block, rhs));
- let lhs = unpack!(block = this.as_lvalue(block, lhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
this.cfg.push_assign(block, source_info, &lhs, rhs);
block.unit()
}
// As above, RTL.
let rhs = unpack!(block = this.as_local_operand(block, rhs));
- let lhs = unpack!(block = this.as_lvalue(block, lhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
// we don't have to drop prior contents or anything
// because AssignOp is only legal for Copy types
ExprKind::Return { value } => {
block = match value {
Some(value) => {
- unpack!(this.into(&Place::Local(RETURN_POINTER), block, value))
+ unpack!(this.into(&Place::Local(RETURN_PLACE), block, value))
}
None => {
this.cfg.push_assign_unit(block,
source_info,
- &Place::Local(RETURN_POINTER));
+ &Place::Local(RETURN_PLACE));
block
}
};
}
ExprKind::InlineAsm { asm, outputs, inputs } => {
let outputs = outputs.into_iter().map(|output| {
- unpack!(block = this.as_lvalue(block, output))
+ unpack!(block = this.as_place(block, output))
}).collect();
let inputs = inputs.into_iter().map(|input| {
unpack!(block = this.as_local_operand(block, input))
discriminant: ExprRef<'tcx>,
arms: Vec<Arm<'tcx>>)
-> BlockAnd<()> {
- let discriminant_lvalue = unpack!(block = self.as_lvalue(block, discriminant));
+ let discriminant_place = unpack!(block = self.as_place(block, discriminant));
let mut arm_blocks = ArmBlocks {
blocks: arms.iter()
(pre_binding_block, next_candidate_pre_binding_block))| {
Candidate {
span: pattern.span,
- match_pairs: vec![MatchPair::new(discriminant_lvalue.clone(), pattern)],
+ match_pairs: vec![MatchPair::new(discriminant_place.clone(), pattern)],
bindings: vec![],
guard,
arm_index,
self.cfg.terminate(*pre_binding_blocks.last().unwrap(),
outer_source_info, TerminatorKind::Unreachable);
- // this will generate code to test discriminant_lvalue and
+ // this will generate code to test discriminant_place and
// branch to the appropriate arm block
let otherwise = self.match_candidates(span, &mut arm_blocks, candidates, block);
PatternKind::Binding { mode: BindingMode::ByValue,
var,
subpattern: None, .. } => {
- let lvalue = self.storage_live_binding(block, var, irrefutable_pat.span);
- unpack!(block = self.into(&lvalue, block, initializer));
+ let place = self.storage_live_binding(block, var, irrefutable_pat.span);
+ unpack!(block = self.into(&place, block, initializer));
self.schedule_drop_for_binding(var, irrefutable_pat.span);
block.unit()
}
_ => {
- let lvalue = unpack!(block = self.as_lvalue(block, initializer));
- self.lvalue_into_pattern(block, irrefutable_pat, &lvalue)
+ let place = unpack!(block = self.as_place(block, initializer));
+ self.place_into_pattern(block, irrefutable_pat, &place)
}
}
}
- pub fn lvalue_into_pattern(&mut self,
+ pub fn place_into_pattern(&mut self,
mut block: BasicBlock,
irrefutable_pat: Pattern<'tcx>,
initializer: &Place<'tcx>)
#[derive(Clone, Debug)]
pub struct MatchPair<'pat, 'tcx:'pat> {
- // this lvalue...
- lvalue: Place<'tcx>,
+ // this place...
+ place: Place<'tcx>,
// ... must match this pattern.
pattern: &'pat Pattern<'tcx>,
match test.kind {
TestKind::SwitchInt { switch_ty, ref mut options, ref mut indices } => {
for candidate in candidates.iter() {
- if !self.add_cases_to_switch(&match_pair.lvalue,
+ if !self.add_cases_to_switch(&match_pair.place,
candidate,
switch_ty,
options,
}
TestKind::Switch { adt_def: _, ref mut variants} => {
for candidate in candidates.iter() {
- if !self.add_variants_to_switch(&match_pair.lvalue,
+ if !self.add_variants_to_switch(&match_pair.place,
candidate,
variants) {
break;
// vector of candidates. Those are the candidates that still
// apply if the test has that particular outcome.
debug!("match_candidates: test={:?} match_pair={:?}", test, match_pair);
- let target_blocks = self.perform_test(block, &match_pair.lvalue, &test);
+ let target_blocks = self.perform_test(block, &match_pair.place, &test);
let mut target_candidates: Vec<_> = (0..target_blocks.len()).map(|_| vec![]).collect();
// Sort the candidates into the appropriate vector in
// that point, we stop sorting.
let tested_candidates =
candidates.iter()
- .take_while(|c| self.sort_candidate(&match_pair.lvalue,
+ .take_while(|c| self.sort_candidate(&match_pair.place,
&test,
c,
&mut target_candidates))
//! Simplifying Candidates
//!
-//! *Simplifying* a match pair `lvalue @ pattern` means breaking it down
+//! *Simplifying* a match pair `place @ pattern` means breaking it down
//! into bindings or other, simpler match pairs. For example:
//!
-//! - `lvalue @ (P1, P2)` can be simplified to `[lvalue.0 @ P1, lvalue.1 @ P2]`
-//! - `lvalue @ x` can be simplified to `[]` by binding `x` to `lvalue`
+//! - `place @ (P1, P2)` can be simplified to `[place.0 @ P1, place.1 @ P2]`
+//! - `place @ x` can be simplified to `[]` by binding `x` to `place`
//!
//! The `simplify_candidate` routine just repeatedly applies these
//! sort of simplifications until there is nothing left to
name,
mutability,
span: match_pair.pattern.span,
- source: match_pair.lvalue.clone(),
+ source: match_pair.place.clone(),
var_id: var,
var_ty: ty,
binding_mode: mode,
if let Some(subpattern) = subpattern.as_ref() {
// this is the `x @ P` case; have to keep matching against `P` now
- candidate.match_pairs.push(MatchPair::new(match_pair.lvalue, subpattern));
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
}
Ok(())
}
});
if irrefutable {
- let lvalue = match_pair.lvalue.downcast(adt_def, variant_index);
- candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns));
+ let place = match_pair.place.downcast(adt_def, variant_index);
+ candidate.match_pairs.extend(self.field_match_pairs(place, subpatterns));
Ok(())
} else {
Err(match_pair)
PatternKind::Array { ref prefix, ref slice, ref suffix } => {
self.prefix_slice_suffix(&mut candidate.match_pairs,
- &match_pair.lvalue,
+ &match_pair.place,
prefix,
slice.as_ref(),
suffix);
PatternKind::Leaf { ref subpatterns } => {
// tuple struct, match subpats (if any)
candidate.match_pairs
- .extend(self.field_match_pairs(match_pair.lvalue, subpatterns));
+ .extend(self.field_match_pairs(match_pair.place, subpatterns));
Ok(())
}
PatternKind::Deref { ref subpattern } => {
- let lvalue = match_pair.lvalue.deref();
- candidate.match_pairs.push(MatchPair::new(lvalue, subpattern));
+ let place = match_pair.place.deref();
+ candidate.match_pairs.push(MatchPair::new(place, subpattern));
Ok(())
}
}
}
pub fn add_cases_to_switch<'pat>(&mut self,
- test_lvalue: &Place<'tcx>,
+ test_place: &Place<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
switch_ty: Ty<'tcx>,
options: &mut Vec<&'tcx ty::Const<'tcx>>,
indices: &mut FxHashMap<&'tcx ty::Const<'tcx>, usize>)
-> bool
{
- let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) {
+ let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) {
Some(match_pair) => match_pair,
_ => { return false; }
};
match *match_pair.pattern.kind {
PatternKind::Constant { value } => {
- // if the lvalues match, the type should match
+ // if the places match, the type should match
assert_eq!(match_pair.pattern.ty, switch_ty);
indices.entry(value)
}
pub fn add_variants_to_switch<'pat>(&mut self,
- test_lvalue: &Place<'tcx>,
+ test_place: &Place<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
variants: &mut BitVector)
-> bool
{
- let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) {
+ let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) {
Some(match_pair) => match_pair,
_ => { return false; }
};
/// Generates the code to perform a test.
pub fn perform_test(&mut self,
block: BasicBlock,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
test: &Test<'tcx>)
-> Vec<BasicBlock> {
let source_info = self.source_info(test.span);
let discr_ty = adt_def.repr.discr_type().to_ty(tcx);
let discr = self.temp(discr_ty, test.span);
self.cfg.push_assign(block, source_info, &discr,
- Rvalue::Discriminant(lvalue.clone()));
+ Rvalue::Discriminant(place.clone()));
assert_eq!(values.len() + 1, targets.len());
self.cfg.terminate(block, source_info, TerminatorKind::SwitchInt {
discr: Operand::Move(discr),
ConstVal::Bool(false) => vec![false_bb, true_bb],
v => span_bug!(test.span, "expected boolean value but got {:?}", v)
};
- (ret, TerminatorKind::if_(self.hir.tcx(), Operand::Copy(lvalue.clone()),
+ (ret, TerminatorKind::if_(self.hir.tcx(), Operand::Copy(place.clone()),
true_bb, false_bb))
} else {
// The switch may be inexhaustive so we
v.val.to_const_int().expect("switching on integral")
).collect();
(targets.clone(), TerminatorKind::SwitchInt {
- discr: Operand::Copy(lvalue.clone()),
+ discr: Operand::Copy(place.clone()),
switch_ty,
values: From::from(values),
targets,
}
TestKind::Eq { value, mut ty } => {
- let mut val = Operand::Copy(lvalue.clone());
+ let mut val = Operand::Copy(place.clone());
// If we're using b"..." as a pattern, we need to insert an
// unsizing coercion, as the byte string has the type &[u8; N].
let expect = if let ConstVal::ByteStr(bytes) = value.val {
let tcx = self.hir.tcx();
- // Unsize the lvalue to &[u8], too, if necessary.
+ // Unsize the place to &[u8], too, if necessary.
if let ty::TyRef(region, mt) = ty.sty {
if let ty::TyArray(_, _) = mt.ty.sty {
ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8));
// Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons.
let lo = self.literal_operand(test.span, ty.clone(), lo.clone());
let hi = self.literal_operand(test.span, ty.clone(), hi.clone());
- let val = Operand::Copy(lvalue.clone());
+ let val = Operand::Copy(place.clone());
let fail = self.cfg.start_new_block();
let block = self.compare(block, fail, test.span, BinOp::Le, lo, val.clone());
let (actual, result) = (self.temp(usize_ty, test.span),
self.temp(bool_ty, test.span));
- // actual = len(lvalue)
+ // actual = len(place)
self.cfg.push_assign(block, source_info,
- &actual, Rvalue::Len(lvalue.clone()));
+ &actual, Rvalue::Len(place.clone()));
// expected = <N>
let expected = self.push_usize(block, source_info, len);
target_block
}
- /// Given that we are performing `test` against `test_lvalue`,
+ /// Given that we are performing `test` against `test_place`,
/// this job sorts out what the status of `candidate` will be
/// after the test. The `resulting_candidates` vector stores, for
/// each possible outcome of `test`, a vector of the candidates
/// not apply to this candidate, but it might be we can get
/// tighter match code if we do something a bit different.
pub fn sort_candidate<'pat>(&mut self,
- test_lvalue: &Place<'tcx>,
+ test_place: &Place<'tcx>,
test: &Test<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
resulting_candidates: &mut [Vec<Candidate<'pat, 'tcx>>])
-> bool {
- // Find the match_pair for this lvalue (if any). At present,
+ // Find the match_pair for this place (if any). At present,
// afaik, there can be at most one. (In the future, if we
// adopted a more general `@` operator, there might be more
// than one, but it'd be very unusual to have two sides that
// away.)
let tested_match_pair = candidate.match_pairs.iter()
.enumerate()
- .filter(|&(_, mp)| mp.lvalue == *test_lvalue)
+ .filter(|&(_, mp)| mp.place == *test_place)
.next();
let (match_pair_index, match_pair) = match tested_match_pair {
Some(pair) => pair,
None => {
- // We are not testing this lvalue. Therefore, this
+ // We are not testing this place. Therefore, this
// candidate applies to ALL outcomes.
return false;
}
self.candidate_without_match_pair(match_pair_index, candidate);
self.prefix_slice_suffix(
&mut new_candidate.match_pairs,
- &candidate.match_pairs[match_pair_index].lvalue,
+ &candidate.match_pairs[match_pair_index].place,
prefix,
opt_slice,
suffix);
// we want to create a set of derived match-patterns like
// `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
let elem = ProjectionElem::Downcast(adt_def, variant_index);
- let downcast_lvalue = match_pair.lvalue.clone().elem(elem); // `(x as Variant)`
+ let downcast_place = match_pair.place.clone().elem(elem); // `(x as Variant)`
let consequent_match_pairs =
subpatterns.iter()
.map(|subpattern| {
// e.g., `(x as Variant).0`
- let lvalue = downcast_lvalue.clone().field(subpattern.field,
+ let place = downcast_place.clone().field(subpattern.field,
subpattern.pattern.ty);
// e.g., `(x as Variant).0 @ P1`
- MatchPair::new(lvalue, &subpattern.pattern)
+ MatchPair::new(place, &subpattern.pattern)
});
// In addition, we need all the other match pairs from the old candidate.
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
pub fn field_match_pairs<'pat>(&mut self,
- lvalue: Place<'tcx>,
+ place: Place<'tcx>,
subpatterns: &'pat [FieldPattern<'tcx>])
-> Vec<MatchPair<'pat, 'tcx>> {
subpatterns.iter()
.map(|fieldpat| {
- let lvalue = lvalue.clone().field(fieldpat.field,
+ let place = place.clone().field(fieldpat.field,
fieldpat.pattern.ty);
- MatchPair::new(lvalue, &fieldpat.pattern)
+ MatchPair::new(place, &fieldpat.pattern)
})
.collect()
}
pub fn prefix_slice_suffix<'pat>(&mut self,
match_pairs: &mut Vec<MatchPair<'pat, 'tcx>>,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
prefix: &'pat [Pattern<'tcx>],
opt_slice: Option<&'pat Pattern<'tcx>>,
suffix: &'pat [Pattern<'tcx>]) {
min_length,
from_end: false,
};
- let lvalue = lvalue.clone().elem(elem);
- MatchPair::new(lvalue, subpattern)
+ let place = place.clone().elem(elem);
+ MatchPair::new(place, subpattern)
})
);
if let Some(subslice_pat) = opt_slice {
- let subslice = lvalue.clone().elem(ProjectionElem::Subslice {
+ let subslice = place.clone().elem(ProjectionElem::Subslice {
from: prefix.len() as u32,
to: suffix.len() as u32
});
min_length,
from_end: true,
};
- let lvalue = lvalue.clone().elem(elem);
- MatchPair::new(lvalue, subpattern)
+ let place = place.clone().elem(elem);
+ MatchPair::new(place, subpattern)
})
);
}
}
impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
- pub fn new(lvalue: Place<'tcx>, pattern: &'pat Pattern<'tcx>) -> MatchPair<'pat, 'tcx> {
+ pub fn new(place: Place<'tcx>, pattern: &'pat Pattern<'tcx>) -> MatchPair<'pat, 'tcx> {
MatchPair {
- lvalue,
+ place,
pattern,
slice_len_checked: false,
}
/// call `schedule_drop` once the temporary is initialized.
pub fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Place<'tcx> {
let temp = self.local_decls.push(LocalDecl::new_temp(ty, span));
- let lvalue = Place::Local(temp);
+ let place = Place::Local(temp);
debug!("temp: created temp {:?} with type {:?}",
- lvalue, self.local_decls[temp].ty);
- lvalue
+ place, self.local_decls[temp].ty);
+ place
}
pub fn literal_operand(&mut self,
temp
}
- pub fn consume_by_copy_or_move(&self, lvalue: Place<'tcx>) -> Operand<'tcx> {
+ pub fn consume_by_copy_or_move(&self, place: Place<'tcx>) -> Operand<'tcx> {
let tcx = self.hir.tcx();
- let ty = lvalue.ty(&self.local_decls, tcx).to_ty(tcx);
+ let ty = place.ty(&self.local_decls, tcx).to_ty(tcx);
if self.hir.type_moves_by_default(ty, DUMMY_SP) {
- Operand::Move(lvalue)
+ Operand::Move(place)
} else {
- Operand::Copy(lvalue)
+ Operand::Copy(place)
}
}
}
let mut block = START_BLOCK;
let expr = builder.hir.mirror(ast_expr);
- unpack!(block = builder.into_expr(&Place::Local(RETURN_POINTER), block, expr));
+ unpack!(block = builder.into_expr(&Place::Local(RETURN_PLACE), block, expr));
let source_info = builder.source_info(span);
builder.cfg.terminate(block, source_info, TerminatorKind::Return);
push_unsafe_count: 0,
unpushed_unsafe: safety,
breakable_scopes: vec![],
- local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty,
+ local_decls: IndexVec::from_elem_n(LocalDecl::new_return_place(return_ty,
span), 1),
var_indices: NodeMap(),
unit_temp: None,
let mut scope = None;
// Bind the argument patterns
for (index, &(ty, pattern)) in arguments.iter().enumerate() {
- // Function arguments always get the first Local indices after the return pointer
+ // Function arguments always get the first Local indices after the return place
let local = Local::new(index + 1);
- let lvalue = Place::Local(local);
+ let place = Place::Local(local);
if let Some(pattern) = pattern {
let pattern = self.hir.pattern_from_hir(pattern);
_ => {
scope = self.declare_bindings(scope, ast_body.span,
LintLevel::Inherited, &pattern);
- unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue));
+ unpack!(block = self.place_into_pattern(block, pattern, &place));
}
}
}
// Make sure we drop (parts of) the argument even when not matched on.
self.schedule_drop(pattern.as_ref().map_or(ast_body.span, |pat| pat.span),
- argument_scope, &lvalue, ty);
+ argument_scope, &place, ty);
}
}
let body = self.hir.mirror(ast_body);
- self.into(&Place::Local(RETURN_POINTER), block, body)
+ self.into(&Place::Local(RETURN_PLACE), block, body)
}
fn get_unit_temp(&mut self) -> Place<'tcx> {
### Drops
The primary purpose for scopes is to insert drops: while translating
-the contents, we also accumulate lvalues that need to be dropped upon
+the contents, we also accumulate places that need to be dropped upon
exit from each scope. This is done by calling `schedule_drop`. Once a
drop is scheduled, whenever we branch out we will insert drops of all
-those lvalues onto the outgoing edge. Note that we don't know the full
+those places onto the outgoing edge. Note that we don't know the full
set of scheduled drops up front, and so whenever we exit from the
scope we only drop the values scheduled thus far. For example, consider
the scope S corresponding to this loop:
/// * freeing up stack space has no effect during unwinding
needs_cleanup: bool,
- /// set of lvalues to drop when exiting this scope. This starts
+ /// set of places to drop when exiting this scope. This starts
/// out empty but grows as variables are declared during the
/// building process. This is a stack, so we always drop from the
/// end of the vector (top of the stack) first.
#[derive(Debug)]
struct DropData<'tcx> {
- /// span where drop obligation was incurred (typically where lvalue was declared)
+ /// span where drop obligation was incurred (typically where place was declared)
span: Span,
- /// lvalue to drop
+ /// place to drop
location: Place<'tcx>,
/// Whether this is a full value Drop, or just a StorageDead.
// Scheduling drops
// ================
- /// Indicates that `lvalue` should be dropped on exit from
+ /// Indicates that `place` should be dropped on exit from
/// `region_scope`.
pub fn schedule_drop(&mut self,
span: Span,
region_scope: region::Scope,
- lvalue: &Place<'tcx>,
- lvalue_ty: Ty<'tcx>) {
- let needs_drop = self.hir.needs_drop(lvalue_ty);
+ place: &Place<'tcx>,
+ place_ty: Ty<'tcx>) {
+ let needs_drop = self.hir.needs_drop(place_ty);
let drop_kind = if needs_drop {
DropKind::Value { cached_block: CachedBlock::default() }
} else {
// Only temps and vars need their storage dead.
- match *lvalue {
+ match *place {
Place::Local(index) if index.index() > self.arg_count => DropKind::Storage,
_ => return
}
let scope_end = region_scope_span.with_lo(region_scope_span.hi());
scope.drops.push(DropData {
span: scope_end,
- location: lvalue.clone(),
+ location: place.clone(),
kind: drop_kind
});
return;
}
}
- span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, lvalue);
+ span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
}
// Other
{
let mut next_child = move_data.move_paths[path].first_child;
while let Some(child_index) = next_child {
- match move_data.move_paths[child_index].lvalue {
+ match move_data.move_paths[child_index].place {
mir::Place::Projection(ref proj) => {
if cond(proj) {
return Some(child_index)
/// is no need to maintain separate drop flags to track such state.
///
/// FIXME: we have to do something for moving slice patterns.
-fn lvalue_contents_drop_state_cannot_differ<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+fn place_contents_drop_state_cannot_differ<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &Mir<'tcx>,
- lv: &mir::Place<'tcx>) -> bool {
- let ty = lv.ty(mir, tcx).to_ty(tcx);
+ place: &mir::Place<'tcx>) -> bool {
+ let ty = place.ty(mir, tcx).to_ty(tcx);
match ty.sty {
ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => {
- debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} refd => true",
- lv, ty);
+ debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
+ place, ty);
true
}
ty::TyAdt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
- debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} Drop => true",
- lv, ty);
+ debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} Drop => true",
+ place, ty);
true
}
_ => {
move_data: &MoveData<'tcx>,
path: MovePathIndex) -> bool
{
- lvalue_contents_drop_state_cannot_differ(
- tcx, mir, &move_data.move_paths[path].lvalue)
+ place_contents_drop_state_cannot_differ(
+ tcx, mir, &move_data.move_paths[path].place)
}
fn on_all_children_bits<'a, 'gcx, 'tcx, F>(
where F: FnMut(MovePathIndex)
{
on_all_children_bits(tcx, mir, &ctxt.move_data, path, |child| {
- let lvalue = &ctxt.move_data.move_paths[path].lvalue;
- let ty = lvalue.ty(mir, tcx).to_ty(tcx);
- debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, lvalue, ty);
+ let place = &ctxt.move_data.move_paths[path].place;
+ let ty = place.ty(mir, tcx).to_ty(tcx);
+ debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty);
let gcx = tcx.global_tcx();
let erased_ty = gcx.lift(&tcx.erase_regions(&ty)).unwrap();
{
let move_data = &ctxt.move_data;
for arg in mir.args_iter() {
- let lvalue = mir::Place::Local(arg);
- let lookup_result = move_data.rev_lookup.find(&lvalue);
+ let place = mir::Place::Local(arg);
+ let lookup_result = move_data.rev_lookup.find(&place);
on_lookup_result_bits(tcx, mir, move_data,
lookup_result,
|mpi| callback(mpi, DropFlagState::Present));
}
// temporarily allow some dead fields: `kind` and `region` will be
-// needed by borrowck; `lvalue` will probably be a MovePathIndex when
+// needed by borrowck; `place` will probably be a MovePathIndex when
// that is extended to include borrowed data paths.
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) location: Location,
pub(crate) kind: mir::BorrowKind,
pub(crate) region: Region<'tcx>,
- pub(crate) lvalue: mir::Place<'tcx>,
+ pub(crate) place: mir::Place<'tcx>,
}
impl<'tcx> fmt::Display for BorrowData<'tcx> {
};
let region = format!("{}", self.region);
let region = if region.len() > 0 { format!("{} ", region) } else { region };
- write!(w, "&{}{}{:?}", region, kind, self.lvalue)
+ write!(w, "&{}{}{:?}", region, kind, self.place)
}
}
fn visit_rvalue(&mut self,
rvalue: &mir::Rvalue<'tcx>,
location: mir::Location) {
- if let mir::Rvalue::Ref(region, kind, ref lvalue) = *rvalue {
- if is_unsafe_lvalue(self.tcx, self.mir, lvalue) { return; }
+ if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
+ if is_unsafe_place(self.tcx, self.mir, place) { return; }
let borrow = BorrowData {
- location: location, kind: kind, region: region, lvalue: lvalue.clone(),
+ location: location, kind: kind, region: region, place: place.clone(),
};
let idx = self.idx_vec.push(borrow);
self.location_map.insert(location, idx);
}
mir::StatementKind::Assign(_, ref rhs) => {
- if let mir::Rvalue::Ref(region, _, ref lvalue) = *rhs {
- if is_unsafe_lvalue(self.tcx, self.mir, lvalue) { return; }
+ if let mir::Rvalue::Ref(region, _, ref place) = *rhs {
+ if is_unsafe_place(self.tcx, self.mir, place) { return; }
let index = self.location_map.get(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
_in_out: &mut IdxSet<BorrowIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- _dest_lval: &mir::Place) {
+ _dest_place: &mir::Place) {
// there are no effects on the region scopes from method calls.
}
}
}
}
-fn is_unsafe_lvalue<'a, 'gcx: 'tcx, 'tcx: 'a>(
+fn is_unsafe_place<'a, 'gcx: 'tcx, 'tcx: 'a>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
- lvalue: &mir::Place<'tcx>
+ place: &mir::Place<'tcx>
) -> bool {
use self::mir::Place::*;
use self::mir::ProjectionElem;
- match *lvalue {
+ match *place {
Local(_) => false,
Static(ref static_) => tcx.is_static_mut(static_.def_id),
Projection(ref proj) => {
ProjectionElem::Subslice { .. } |
ProjectionElem::ConstantIndex { .. } |
ProjectionElem::Index(_) => {
- is_unsafe_lvalue(tcx, mir, &proj.base)
+ is_unsafe_place(tcx, mir, &proj.base)
}
ProjectionElem::Deref => {
let ty = proj.base.ty(mir, tcx).to_ty(tcx);
match ty.sty {
ty::TyRawPtr(..) => true,
- _ => is_unsafe_lvalue(tcx, mir, &proj.base),
+ _ => is_unsafe_place(tcx, mir, &proj.base),
}
}
}
in_out: &mut IdxSet<MovePathIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- dest_lval: &mir::Place) {
+ dest_place: &mir::Place) {
// when a call returns successfully, that means we need to set
- // the bits for that dest_lval to 1 (initialized).
+ // the bits for that dest_place to 1 (initialized).
on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
- self.move_data().rev_lookup.find(dest_lval),
+ self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.add(&mpi); });
}
}
self.move_data().move_paths.len()
}
- // sets on_entry bits for Arg lvalues
+ // sets on_entry bits for Arg places
fn start_block_effect(&self, sets: &mut BlockSets<MovePathIndex>) {
// set all bits to 1 (uninit) before gathering counterevidence
for e in sets.on_entry.words_mut() { *e = !0; }
in_out: &mut IdxSet<MovePathIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- dest_lval: &mir::Place) {
+ dest_place: &mir::Place) {
// when a call returns successfully, that means we need to set
- // the bits for that dest_lval to 0 (initialized).
+ // the bits for that dest_place to 0 (initialized).
on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
- self.move_data().rev_lookup.find(dest_lval),
+ self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.remove(&mpi); });
}
}
self.move_data().move_paths.len()
}
- // sets on_entry bits for Arg lvalues
+ // sets on_entry bits for Arg places
fn start_block_effect(&self, sets: &mut BlockSets<MovePathIndex>) {
for e in sets.on_entry.words_mut() { *e = 0; }
in_out: &mut IdxSet<MovePathIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- dest_lval: &mir::Place) {
+ dest_place: &mir::Place) {
// when a call returns successfully, that means we need to set
- // the bits for that dest_lval to 1 (initialized).
+ // the bits for that dest_place to 1 (initialized).
on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
- self.move_data().rev_lookup.find(dest_lval),
+ self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.add(&mpi); });
}
}
in_out: &mut IdxSet<MoveOutIndex>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- dest_lval: &mir::Place) {
+ dest_place: &mir::Place) {
let move_data = self.move_data();
let bits_per_block = self.bits_per_block();
on_lookup_result_bits(self.tcx,
self.mir,
move_data,
- move_data.rev_lookup.find(dest_lval),
+ move_data.rev_lookup.find(dest_place),
|mpi| for moi in &path_map[mpi] {
assert!(moi.index() < bits_per_block);
in_out.remove(&moi);
in_out: &mut IdxSet<InitIndex>,
call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- _dest_lval: &mir::Place) {
+ _dest_place: &mir::Place) {
let move_data = self.move_data();
let bits_per_block = self.bits_per_block();
let init_loc_map = &move_data.init_loc_map;
_in_out: &mut IdxSet<Local>,
_call_bb: mir::BasicBlock,
_dest_bb: mir::BasicBlock,
- _dest_lval: &mir::Place) {
+ _dest_place: &mir::Place) {
// Nothing to do when a call returns successfully
}
}
in_out: &mut IdxSet<Self::Idx>,
call_bb: mir::BasicBlock,
dest_bb: mir::BasicBlock,
- dest_lval: &mir::Place);
+ dest_place: &mir::Place);
}
impl<'a, 'gcx, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation
self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
}
}
- if let Some((ref dest_lval, ref dest_bb)) = *destination {
+ if let Some((ref dest_place, ref dest_bb)) = *destination {
// N.B.: This must be done *last*, after all other
// propagation, as documented in comment above.
self.flow_state.operator.propagate_call_return(
- in_out, bb, *dest_bb, dest_lval);
+ in_out, bb, *dest_bb, dest_place);
self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb);
}
}
path_map: &mut IndexVec<MovePathIndex, Vec<MoveOutIndex>>,
init_path_map: &mut IndexVec<MovePathIndex, Vec<InitIndex>>,
parent: Option<MovePathIndex>,
- lvalue: Place<'tcx>)
+ place: Place<'tcx>)
-> MovePathIndex
{
let move_path = move_paths.push(MovePath {
next_sibling: None,
first_child: None,
parent,
- lvalue,
+ place,
});
if let Some(parent) = parent {
}
impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
- /// This creates a MovePath for a given lvalue, returning an `MovePathError`
- /// if that lvalue can't be moved from.
+ /// This creates a MovePath for a given place, returning an `MovePathError`
+ /// if that place can't be moved from.
///
- /// NOTE: lvalues behind references *do not* get a move path, which is
+ /// NOTE: places behind references *do not* get a move path, which is
/// problematic for borrowck.
///
/// Maybe we should have separate "borrowck" and "moveck" modes.
- fn move_path_for(&mut self, lval: &Place<'tcx>)
+ fn move_path_for(&mut self, place: &Place<'tcx>)
-> Result<MovePathIndex, MoveError<'tcx>>
{
- debug!("lookup({:?})", lval);
- match *lval {
+ debug!("lookup({:?})", place);
+ match *place {
Place::Local(local) => Ok(self.builder.data.rev_lookup.locals[local]),
Place::Static(..) => {
let span = self.builder.mir.source_info(self.loc).span;
Err(MoveError::cannot_move_out_of(span, Static))
}
Place::Projection(ref proj) => {
- self.move_path_for_projection(lval, proj)
+ self.move_path_for_projection(place, proj)
}
}
}
- fn create_move_path(&mut self, lval: &Place<'tcx>) {
+ fn create_move_path(&mut self, place: &Place<'tcx>) {
// This is an assignment, not a move, so this not being a valid
// move path is OK.
- let _ = self.move_path_for(lval);
+ let _ = self.move_path_for(place);
}
fn move_path_for_projection(&mut self,
- lval: &Place<'tcx>,
+ place: &Place<'tcx>,
proj: &PlaceProjection<'tcx>)
-> Result<MovePathIndex, MoveError<'tcx>>
{
let base = try!(self.move_path_for(&proj.base));
let mir = self.builder.mir;
let tcx = self.builder.tcx;
- let lv_ty = proj.base.ty(mir, tcx).to_ty(tcx);
- match lv_ty.sty {
+ let place_ty = proj.base.ty(mir, tcx).to_ty(tcx);
+ match place_ty.sty {
ty::TyRef(..) | ty::TyRawPtr(..) =>
return Err(MoveError::cannot_move_out_of(mir.source_info(self.loc).span,
BorrowedContent)),
ty::TyAdt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() =>
return Err(MoveError::cannot_move_out_of(mir.source_info(self.loc).span,
InteriorOfTypeWithDestructor {
- container_ty: lv_ty
+ container_ty: place_ty
})),
// move out of union - always move the entire union
ty::TyAdt(adt, _) if adt.is_union() =>
return Err(MoveError::cannot_move_out_of(
mir.source_info(self.loc).span,
InteriorOfSliceOrArray {
- ty: lv_ty, is_index: match proj.elem {
+ ty: place_ty, is_index: match proj.elem {
ProjectionElem::Index(..) => true,
_ => false
},
return Err(MoveError::cannot_move_out_of(
mir.source_info(self.loc).span,
InteriorOfSliceOrArray {
- ty: lv_ty, is_index: true
+ ty: place_ty, is_index: true
})),
_ => {
// FIXME: still badly broken
&mut self.builder.data.path_map,
&mut self.builder.data.init_path_map,
Some(base),
- lval.clone()
+ place.clone()
);
ent.insert(path);
Ok(path)
impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> {
fn gather_statement(&mut self, stmt: &Statement<'tcx>) {
match stmt.kind {
- StatementKind::Assign(ref lval, ref rval) => {
- self.create_move_path(lval);
+ StatementKind::Assign(ref place, ref rval) => {
+ self.create_move_path(place);
if let RvalueInitializationState::Shallow = rval.initialization_state() {
// Box starts out uninitialized - need to create a separate
// move-path for the interior so it will be separate from
// the exterior.
- self.create_move_path(&lval.clone().deref());
- self.gather_init(lval, InitKind::Shallow);
+ self.create_move_path(&place.clone().deref());
+ self.gather_init(place, InitKind::Shallow);
} else {
- self.gather_init(lval, InitKind::Deep);
+ self.gather_init(place, InitKind::Deep);
}
self.gather_rvalue(rval);
}
Rvalue::NullaryOp(NullOp::Box, _) => {
// This returns an rvalue with uninitialized contents. We can't
// move out of it here because it is an rvalue - assignments always
- // completely initialize their lvalue.
+ // completely initialize their place.
//
// However, this does not matter - MIR building is careful to
// only emit a shallow free for the partially-initialized
TerminatorKind::Unreachable => { }
TerminatorKind::Return => {
- self.gather_move(&Place::Local(RETURN_POINTER));
+ self.gather_move(&Place::Local(RETURN_PLACE));
}
TerminatorKind::Assert { .. } |
match *operand {
Operand::Constant(..) |
Operand::Copy(..) => {} // not-a-move
- Operand::Move(ref lval) => { // a move
- self.gather_move(lval);
+ Operand::Move(ref place) => { // a move
+ self.gather_move(place);
}
}
}
- fn gather_move(&mut self, lval: &Place<'tcx>) {
- debug!("gather_move({:?}, {:?})", self.loc, lval);
+ fn gather_move(&mut self, place: &Place<'tcx>) {
+ debug!("gather_move({:?}, {:?})", self.loc, place);
- let path = match self.move_path_for(lval) {
+ let path = match self.move_path_for(place) {
Ok(path) | Err(MoveError::UnionMove { path }) => path,
Err(error @ MoveError::IllegalMove { .. }) => {
self.builder.errors.push(error);
let move_out = self.builder.data.moves.push(MoveOut { path: path, source: self.loc });
debug!("gather_move({:?}, {:?}): adding move {:?} of {:?}",
- self.loc, lval, move_out, path);
+ self.loc, place, move_out, path);
self.builder.data.path_map[path].push(move_out);
self.builder.data.loc_map[self.loc].push(move_out);
}
- fn gather_init(&mut self, lval: &Place<'tcx>, kind: InitKind) {
- debug!("gather_init({:?}, {:?})", self.loc, lval);
+ fn gather_init(&mut self, place: &Place<'tcx>, kind: InitKind) {
+ debug!("gather_init({:?}, {:?})", self.loc, place);
- if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(lval) {
+ if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(place) {
let init = self.builder.data.inits.push(Init {
span: self.builder.mir.source_info(self.loc).span,
path,
});
debug!("gather_init({:?}, {:?}): adding init {:?} of {:?}",
- self.loc, lval, init, path);
+ self.loc, place, init, path);
self.builder.data.init_path_map[path].push(init);
self.builder.data.init_loc_map[self.loc].push(init);
pub next_sibling: Option<MovePathIndex>,
pub first_child: Option<MovePathIndex>,
pub parent: Option<MovePathIndex>,
- pub lvalue: Place<'tcx>,
+ pub place: Place<'tcx>,
}
impl<'tcx> fmt::Debug for MovePath<'tcx> {
if let Some(next_sibling) = self.next_sibling {
write!(w, " next_sibling: {:?}", next_sibling)?;
}
- write!(w, " lvalue: {:?} }}", self.lvalue)
+ write!(w, " place: {:?} }}", self.place)
}
}
impl<'tcx> fmt::Display for MovePath<'tcx> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
- write!(w, "{:?}", self.lvalue)
+ write!(w, "{:?}", self.place)
}
}
pub struct MovePathLookup<'tcx> {
locals: IndexVec<Local, MovePathIndex>,
- /// projections are made from a base-lvalue and a projection
- /// elem. The base-lvalue will have a unique MovePathIndex; we use
+ /// projections are made from a base-place and a projection
+ /// elem. The base-place will have a unique MovePathIndex; we use
/// the latter as the index into the outer vector (narrowing
/// subsequent search so that it is solely relative to that
- /// base-lvalue). For the remaining lookup, we map the projection
+ /// base-place). For the remaining lookup, we map the projection
/// elem to the associated MovePathIndex.
projections: FxHashMap<(MovePathIndex, AbstractElem<'tcx>), MovePathIndex>
}
// alternative will *not* create a MovePath on the fly for an
// unknown l-value, but will rather return the nearest available
// parent.
- pub fn find(&self, lval: &Place<'tcx>) -> LookupResult {
- match *lval {
+ pub fn find(&self, place: &Place<'tcx>) -> LookupResult {
+ match *place {
Place::Local(local) => LookupResult::Exact(self.locals[local]),
Place::Static(..) => LookupResult::Parent(None),
Place::Projection(ref proj) => {
},
};
- overloaded_lvalue(cx, hir_expr, adjustment.target, Some(call), vec![expr.to_ref()])
+ overloaded_place(cx, hir_expr, adjustment.target, Some(call), vec![expr.to_ref()])
}
Adjust::Borrow(AutoBorrow::Ref(r, m)) => {
ExprKind::Borrow {
hir::ExprIndex(ref lhs, ref index) => {
if cx.tables().is_method_call(expr) {
- overloaded_lvalue(cx, expr, expr_ty, None, vec![lhs.to_ref(), index.to_ref()])
+ overloaded_place(cx, expr, expr_ty, None, vec![lhs.to_ref(), index.to_ref()])
} else {
ExprKind::Index {
lhs: lhs.to_ref(),
hir::ExprUnary(hir::UnOp::UnDeref, ref arg) => {
if cx.tables().is_method_call(expr) {
- overloaded_lvalue(cx, expr, expr_ty, None, vec![arg.to_ref()])
+ overloaded_place(cx, expr, expr_ty, None, vec![arg.to_ref()])
} else {
ExprKind::Deref { arg: arg.to_ref() }
}
}
}
-fn overloaded_lvalue<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
+fn overloaded_place<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
expr: &'tcx hir::Expr,
- lvalue_ty: Ty<'tcx>,
+ place_ty: Ty<'tcx>,
custom_callee: Option<(DefId, &'tcx Substs<'tcx>)>,
args: Vec<ExprRef<'tcx>>)
-> ExprKind<'tcx> {
// For an overloaded *x or x[y] expression of type T, the method
// call returns an &T and we must add the deref so that the types
- // line up (this is because `*x` and `x[y]` represent lvalues):
+ // line up (this is because `*x` and `x[y]` represent places):
let recv_ty = match args[0] {
ExprRef::Hair(e) => cx.tables().expr_ty_adjusted(e),
// `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
let (region, mt) = match recv_ty.sty {
ty::TyRef(region, mt) => (region, mt),
- _ => span_bug!(expr.span, "overloaded_lvalue: receiver is not a reference"),
+ _ => span_bug!(expr.span, "overloaded_place: receiver is not a reference"),
};
let ref_ty = cx.tcx.mk_ref(region, ty::TypeAndMut {
- ty: lvalue_ty,
+ ty: place_ty,
mutbl: mt.mutbl,
});
let rcvr = Place::Local(Local::new(1+0)).deref();
let ret_statement = self.make_statement(
StatementKind::Assign(
- Place::Local(RETURN_POINTER),
+ Place::Local(RETURN_PLACE),
Rvalue::Use(Operand::Copy(rcvr))
)
);
self.block(vec![ret_statement], TerminatorKind::Return, false);
}
- fn make_lvalue(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
+ fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
let span = self.span;
Place::Local(
self.local_decls.push(temp_decl(mutability, ty, span))
},
});
- let ref_loc = self.make_lvalue(
+ let ref_loc = self.make_place(
Mutability::Not,
tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty,
})
);
- let loc = self.make_lvalue(Mutability::Not, ty);
+ let loc = self.make_place(Mutability::Not, ty);
// `let ref_loc: &ty = &rcvr_field;`
let statement = self.make_statement(
) {
let tcx = self.tcx;
- let cond = self.make_lvalue(Mutability::Mut, tcx.types.bool);
+ let cond = self.make_place(Mutability::Mut, tcx.types.bool);
let compute_cond = self.make_statement(
StatementKind::Assign(
cond.clone(),
let rcvr = Place::Local(Local::new(1+0)).deref();
let beg = self.local_decls.push(temp_decl(Mutability::Mut, tcx.types.usize, span));
- let end = self.make_lvalue(Mutability::Not, tcx.types.usize);
- let ret = self.make_lvalue(Mutability::Mut, tcx.mk_array(ty, len));
+ let end = self.make_place(Mutability::Not, tcx.types.usize);
+ let ret = self.make_place(Mutability::Mut, tcx.mk_array(ty, len));
// BB #0
// `let mut beg = 0;`
// `return ret;`
let ret_statement = self.make_statement(
StatementKind::Assign(
- Place::Local(RETURN_POINTER),
+ Place::Local(RETURN_PLACE),
Rvalue::Use(Operand::Move(ret.clone())),
)
);
// `return kind(returns[0], returns[1], ..., returns[tys.len() - 1]);`
let ret_statement = self.make_statement(
StatementKind::Assign(
- Place::Local(RETURN_POINTER),
+ Place::Local(RETURN_PLACE),
Rvalue::Aggregate(
box kind,
returns.into_iter().map(Operand::Move).collect()
if let Some(untuple_args) = untuple_args {
args.extend(untuple_args.iter().enumerate().map(|(i, ity)| {
- let arg_lv = Place::Local(Local::new(1+1));
- Operand::Move(arg_lv.field(Field::new(i), *ity))
+ let arg_place = Place::Local(Local::new(1+1));
+ Operand::Move(arg_place.field(Field::new(i), *ity))
}));
} else {
args.extend((1..sig.inputs().len()).map(|i| {
block(&mut blocks, statements, TerminatorKind::Call {
func: callee,
args,
- destination: Some((Place::Local(RETURN_POINTER),
+ destination: Some((Place::Local(RETURN_PLACE),
BasicBlock::new(1))),
cleanup: if let Adjustment::RefMut = rcvr_adjustment {
Some(BasicBlock::new(3))
statements: vec![Statement {
source_info,
kind: StatementKind::Assign(
- Place::Local(RETURN_POINTER),
+ Place::Local(RETURN_PLACE),
Rvalue::Aggregate(
box AggregateKind::Adt(adt_def, variant_no, substs, None),
(1..sig.inputs().len()+1).map(|i| {
pub struct AddValidation;
-/// Determine the "context" of the lval: Mutability and region.
-fn lval_context<'a, 'tcx, D>(
- lval: &Place<'tcx>,
+/// Determine the "context" of the place: Mutability and region.
+fn place_context<'a, 'tcx, D>(
+ place: &Place<'tcx>,
local_decls: &D,
tcx: TyCtxt<'a, 'tcx, 'tcx>
) -> (Option<region::Scope>, hir::Mutability)
{
use rustc::mir::Place::*;
- match *lval {
+ match *place {
Local { .. } => (None, hir::MutMutable),
Static(_) => (None, hir::MutImmutable),
Projection(ref proj) => {
// This is already as restricted as it gets, no need to even recurse
context
} else {
- let base_context = lval_context(&proj.base, local_decls, tcx);
+ let base_context = place_context(&proj.base, local_decls, tcx);
// The region of the outermost Deref is always most restrictive.
let re = context.0.or(base_context.0);
let mutbl = context.1.and(base_context.1);
}
}
- _ => lval_context(&proj.base, local_decls, tcx),
+ _ => place_context(&proj.base, local_decls, tcx),
}
}
}
let restricted_validation = emit_validate == 1 && fn_contains_unsafe(tcx, src);
let local_decls = mir.local_decls.clone(); // FIXME: Find a way to get rid of this clone.
- // Convert an lvalue to a validation operand.
- let lval_to_operand = |lval: Place<'tcx>| -> ValidationOperand<'tcx, Place<'tcx>> {
- let (re, mutbl) = lval_context(&lval, &local_decls, tcx);
- let ty = lval.ty(&local_decls, tcx).to_ty(tcx);
- ValidationOperand { lval, ty, re, mutbl }
+ // Convert a place to a validation operand.
+ let place_to_operand = |place: Place<'tcx>| -> ValidationOperand<'tcx, Place<'tcx>> {
+ let (re, mutbl) = place_context(&place, &local_decls, tcx);
+ let ty = place.ty(&local_decls, tcx).to_ty(tcx);
+ ValidationOperand { place, ty, re, mutbl }
};
// Emit an Acquire at the beginning of the given block. If we are in restricted emission
};
// Gather all arguments, skip return value.
let operands = mir.local_decls.iter_enumerated().skip(1).take(mir.arg_count)
- .map(|(local, _)| lval_to_operand(Place::Local(local))).collect();
+ .map(|(local, _)| place_to_operand(Place::Local(local))).collect();
emit_acquire(&mut mir.basic_blocks_mut()[START_BLOCK], source_info, operands);
}
let release_stmt = Statement {
source_info,
kind: StatementKind::Validate(ValidationOp::Release,
- destination.iter().map(|dest| lval_to_operand(dest.0.clone()))
+ destination.iter().map(|dest| place_to_operand(dest.0.clone()))
.chain(
args.iter().filter_map(|op| {
match op {
- &Operand::Copy(ref lval) |
- &Operand::Move(ref lval) =>
- Some(lval_to_operand(lval.clone())),
+ &Operand::Copy(ref place) |
+ &Operand::Move(ref place) =>
+ Some(place_to_operand(place.clone())),
&Operand::Constant(..) => { None },
}
})
returns.push((source_info, destination.0.clone(), destination.1));
}
}
- Some(Terminator { kind: TerminatorKind::Drop { location: ref lval, .. },
+ Some(Terminator { kind: TerminatorKind::Drop { location: ref place, .. },
source_info }) |
- Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref lval, .. },
+ Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref place, .. },
source_info }) => {
// Before the call: Release all arguments
if !restricted_validation {
let release_stmt = Statement {
source_info,
kind: StatementKind::Validate(ValidationOp::Release,
- vec![lval_to_operand(lval.clone())]),
+ vec![place_to_operand(place.clone())]),
};
block_data.statements.push(release_stmt);
}
}
}
// Now we go over the returns we collected to acquire the return values.
- for (source_info, dest_lval, dest_block) in returns {
+ for (source_info, dest_place, dest_block) in returns {
emit_acquire(
&mut mir.basic_blocks_mut()[dest_block],
source_info,
- vec![lval_to_operand(dest_lval)]
+ vec![place_to_operand(dest_place)]
);
}
StatementKind::Assign(_, Rvalue::Ref(_, _, _)) => {
// Due to a lack of NLL; we can't capture anything directly here.
// Instead, we have to re-match and clone there.
- let (dest_lval, re, src_lval) = match block_data.statements[i].kind {
- StatementKind::Assign(ref dest_lval,
- Rvalue::Ref(re, _, ref src_lval)) => {
- (dest_lval.clone(), re, src_lval.clone())
+ let (dest_place, re, src_place) = match block_data.statements[i].kind {
+ StatementKind::Assign(ref dest_place,
+ Rvalue::Ref(re, _, ref src_place)) => {
+ (dest_place.clone(), re, src_place.clone())
},
_ => bug!("We already matched this."),
};
// So this is a ref, and we got all the data we wanted.
// Do an acquire of the result -- but only what it points to, so add a Deref
// projection.
- let dest_lval = Projection { base: dest_lval, elem: ProjectionElem::Deref };
- let dest_lval = Place::Projection(Box::new(dest_lval));
let acquire_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(ValidationOp::Acquire,
- vec![lval_to_operand(dest_lval)]),
+ vec![place_to_operand(dest_place.deref())]),
};
block_data.statements.insert(i+1, acquire_stmt);
};
let release_stmt = Statement {
source_info: block_data.statements[i].source_info,
- kind: StatementKind::Validate(op, vec![lval_to_operand(src_lval)]),
+ kind: StatementKind::Validate(op, vec![place_to_operand(src_place)]),
};
block_data.statements.insert(i, release_stmt);
}
{
// Due to a lack of NLL; we can't capture anything directly here.
// Instead, we have to re-match and clone there.
- let (dest_lval, src_lval) = match block_data.statements[i].kind {
- StatementKind::Assign(ref dest_lval,
- Rvalue::Cast(_, Operand::Copy(ref src_lval), _)) |
- StatementKind::Assign(ref dest_lval,
- Rvalue::Cast(_, Operand::Move(ref src_lval), _)) =>
+ let (dest_place, src_place) = match block_data.statements[i].kind {
+ StatementKind::Assign(ref dest_place,
+ Rvalue::Cast(_, Operand::Copy(ref src_place), _)) |
+ StatementKind::Assign(ref dest_place,
+ Rvalue::Cast(_, Operand::Move(ref src_place), _)) =>
{
- (dest_lval.clone(), src_lval.clone())
+ (dest_place.clone(), src_place.clone())
},
_ => bug!("We already matched this."),
};
let acquire_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(ValidationOp::Acquire,
- vec![lval_to_operand(dest_lval)]),
+ vec![place_to_operand(dest_place)]),
};
block_data.statements.insert(i+1, acquire_stmt);
let release_stmt = Statement {
source_info: block_data.statements[i].source_info,
kind: StatementKind::Validate(ValidationOp::Release,
- vec![lval_to_operand(src_lval)]),
+ vec![place_to_operand(src_place)]),
};
block_data.statements.insert(i, release_stmt);
}
self.super_rvalue(rvalue, location);
}
- fn visit_lvalue(&mut self,
- lvalue: &Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
if let PlaceContext::Borrow { .. } = context {
- if util::is_disaligned(self.tcx, self.mir, self.param_env, lvalue) {
+ if util::is_disaligned(self.tcx, self.mir, self.param_env, place) {
let source_info = self.source_info;
let lint_root =
self.visibility_scope_info[source_info.scope].lint_root;
}
}
- match lvalue {
+ match place {
&Place::Projection(box Projection {
ref base, ref elem
}) => {
_ => span_bug!(
self.source_info.span,
"non-field projection {:?} from union?",
- lvalue)
+ place)
};
if elem_ty.moves_by_default(self.tcx, self.param_env,
self.source_info.span) {
}
}
};
- self.super_lvalue(lvalue, context, location);
+ self.super_place(place, context, location);
}
}
dest_local);
continue;
}
- let dest_lvalue_def = dest_use_info.defs_not_including_drop().next().unwrap();
- location = dest_lvalue_def.location;
+ let dest_place_def = dest_use_info.defs_not_including_drop().next().unwrap();
+ location = dest_place_def.location;
let basic_block = &mir[location.block];
let statement_index = location.statement_index;
StatementKind::Assign(Place::Local(local), Rvalue::Use(ref operand)) if
local == dest_local => {
let maybe_action = match *operand {
- Operand::Copy(ref src_lvalue) |
- Operand::Move(ref src_lvalue) => {
- Action::local_copy(&mir, &def_use_analysis, src_lvalue)
+ Operand::Copy(ref src_place) |
+ Operand::Move(ref src_place) => {
+ Action::local_copy(&mir, &def_use_analysis, src_place)
}
Operand::Constant(ref src_constant) => {
Action::constant(src_constant)
}
impl<'tcx> Action<'tcx> {
- fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_lvalue: &Place<'tcx>)
+ fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_place: &Place<'tcx>)
-> Option<Action<'tcx>> {
// The source must be a local.
- let src_local = if let Place::Local(local) = *src_lvalue {
+ let src_local = if let Place::Local(local) = *src_place {
local
} else {
debug!(" Can't copy-propagate local: source is not a local");
debug!(" Replacing all uses of {:?} with {:?} (local)",
dest_local,
src_local);
- for lvalue_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
- if lvalue_use.context.is_storage_marker() {
- mir.make_statement_nop(lvalue_use.location)
+ for place_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
+ if place_use.context.is_storage_marker() {
+ mir.make_statement_nop(place_use.location)
}
}
- for lvalue_use in &def_use_analysis.local_info(src_local).defs_and_uses {
- if lvalue_use.context.is_storage_marker() {
- mir.make_statement_nop(lvalue_use.location)
+ for place_use in &def_use_analysis.local_info(src_local).defs_and_uses {
+ if place_use.context.is_storage_marker() {
+ mir.make_statement_nop(place_use.location)
}
}
dest_local,
src_constant);
let dest_local_info = def_use_analysis.local_info(dest_local);
- for lvalue_use in &dest_local_info.defs_and_uses {
- if lvalue_use.context.is_storage_marker() {
- mir.make_statement_nop(lvalue_use.location)
+ for place_use in &dest_local_info.defs_and_uses {
+ if place_use.context.is_storage_marker() {
+ mir.make_statement_nop(place_use.location)
}
}
// Replace all uses of the destination local with the constant.
let mut visitor = ConstantPropagationVisitor::new(dest_local,
src_constant);
- for dest_lvalue_use in &dest_local_info.defs_and_uses {
- visitor.visit_location(mir, dest_lvalue_use.location)
+ for dest_place_use in &dest_local_info.defs_and_uses {
+ visitor.visit_location(mir, dest_place_use.location)
}
// Zap the assignment instruction if we eliminated all the uses. We won't have been
// able to do that if the destination was used in a projection, because projections
- // must have lvalues on their LHS.
+ // must have places on their LHS.
let use_count = dest_local_info.use_count();
if visitor.uses_replaced == use_count {
debug!(" {} of {} use(s) replaced; deleting assignment",
if adt_def.is_enum() {
let set_discriminant = Statement {
kind: StatementKind::SetDiscriminant {
- lvalue: lhs.clone(),
+ place: lhs.clone(),
variant_index: variant,
},
source_info: src_info,
});
let path = self.move_data().rev_lookup.find(location);
- debug!("collect_drop_flags: {:?}, lv {:?} ({:?})",
+ debug!("collect_drop_flags: {:?}, place {:?} ({:?})",
bb, location, path);
let path = match path {
let (_maybe_live, maybe_dead) = init_data.state(parent);
if maybe_dead {
span_bug!(terminator.source_info.span,
- "drop of untracked, uninitialized value {:?}, lv {:?} ({:?})",
+ "drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
bb, location, path);
}
continue
/// The desugaring drops the location if needed, and then writes
/// the value (including setting the drop flag) over it in *both* arms.
///
- /// The `replace` terminator can also be called on lvalues that
+ /// The `replace` terminator can also be called on places that
/// are not tracked by elaboration (for example,
/// `replace x[i] <- tmp0`). The borrow checker requires that
/// these locations are initialized before the assignment,
fn drop_flags_for_fn_rets(&mut self) {
for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
if let TerminatorKind::Call {
- destination: Some((ref lv, tgt)), cleanup: Some(_), ..
+ destination: Some((ref place, tgt)), cleanup: Some(_), ..
} = data.terminator().kind {
assert!(!self.patch.is_patched(bb));
let loc = Location { block: tgt, statement_index: 0 };
- let path = self.move_data().rev_lookup.find(lv);
+ let path = self.move_data().rev_lookup.find(place);
on_lookup_result_bits(
self.tcx, self.mir, self.move_data(), path,
|child| self.set_drop_flag(loc, child, DropFlagState::Present)
// so mark the return as initialized *before* the
// call.
if let TerminatorKind::Call {
- destination: Some((ref lv, _)), cleanup: None, ..
+ destination: Some((ref place, _)), cleanup: None, ..
} = data.terminator().kind {
assert!(!self.patch.is_patched(bb));
let loc = Location { block: bb, statement_index: data.statements.len() };
- let path = self.move_data().rev_lookup.find(lv);
+ let path = self.move_data().rev_lookup.find(place);
on_lookup_result_bits(
self.tcx, self.mir, self.move_data(), path,
|child| self.set_drop_flag(loc, child, DropFlagState::Present)
assert_ne!(*local, self_arg());
}
- fn visit_lvalue(&mut self,
- lvalue: &mut Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &mut Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- if *lvalue == Place::Local(self_arg()) {
- *lvalue = Place::Projection(Box::new(Projection {
- base: lvalue.clone(),
+ if *place == Place::Local(self_arg()) {
+ *place = Place::Projection(Box::new(Projection {
+ base: place.clone(),
elem: ProjectionElem::Deref,
}));
} else {
- self.super_lvalue(lvalue, context, location);
+ self.super_place(place, context, location);
}
}
}
// A list of suspension points, generated during the transform
suspension_points: Vec<SuspensionPoint>,
- // The original RETURN_POINTER local
+ // The original RETURN_PLACE local
new_ret_local: Local,
}
assert_eq!(self.remap.get(local), None);
}
- fn visit_lvalue(&mut self,
- lvalue: &mut Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &mut Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- if let Place::Local(l) = *lvalue {
+ if let Place::Local(l) = *place {
// Replace an Local in the remap with a generator struct access
if let Some(&(ty, idx)) = self.remap.get(&l) {
- *lvalue = self.make_field(idx, ty);
+ *place = self.make_field(idx, ty);
}
} else {
- self.super_lvalue(lvalue, context, location);
+ self.super_place(place, context, location);
}
}
// We must assign the value first in case it gets declared dead below
data.statements.push(Statement {
source_info,
- kind: StatementKind::Assign(Place::Local(RETURN_POINTER),
+ kind: StatementKind::Assign(Place::Local(RETURN_PLACE),
self.make_state(state_idx, v)),
});
let state = if let Some(resume) = resume { // Yield
mir.local_decls.swap(0, new_ret_local.index());
RenameLocalVisitor {
- from: RETURN_POINTER,
+ from: RETURN_PLACE,
to: new_ret_local,
}.visit_mir(mir);
}
// Replace the return variable
- mir.local_decls[RETURN_POINTER] = LocalDecl {
+ mir.local_decls[RETURN_PLACE] = LocalDecl {
mutability: Mutability::Mut,
ty: tcx.mk_nil(),
name: None,
Kind::from(mir.return_ty())].iter());
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
- // We rename RETURN_POINTER which has type mir.return_ty to new_ret_local
- // RETURN_POINTER then is a fresh unused local with type ret_ty.
+ // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
+ // RETURN_PLACE then is a fresh unused local with type ret_ty.
let new_ret_local = replace_result_variable(ret_ty, mir);
// Extract locals which are live across suspension point into `layout`
// `i : &mut usize`, then just duplicating the `a[*i]`
// Place could result in two different locations if `f`
// writes to `i`. To prevent this we need to create a temporary
- // borrow of the lvalue and pass the destination as `*temp` instead.
- fn dest_needs_borrow(lval: &Place) -> bool {
- match *lval {
+ // borrow of the place and pass the destination as `*temp` instead.
+ fn dest_needs_borrow(place: &Place) -> bool {
+ match *place {
Place::Projection(ref p) => {
match p.elem {
ProjectionElem::Deref |
// needs to generate the cast.
// FIXME: we should probably just generate correct MIR in the first place...
- let arg = if let Operand::Move(ref lval) = args[0] {
- lval.clone()
+ let arg = if let Operand::Move(ref place) = args[0] {
+ place.clone()
} else {
bug!("Constant arg to \"box_free\"");
};
local: &mut Local,
_ctxt: PlaceContext<'tcx>,
_location: Location) {
- if *local == RETURN_POINTER {
+ if *local == RETURN_PLACE {
match self.destination {
Place::Local(l) => {
*local = l;
return;
},
- ref lval => bug!("Return lvalue is {:?}, not local", lval)
+ ref place => bug!("Return place is {:?}, not local", place)
}
}
let idx = local.index() - 1;
*local = self.local_map[Local::new(idx - self.args.len())];
}
- fn visit_lvalue(&mut self,
- lvalue: &mut Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &mut Place<'tcx>,
_ctxt: PlaceContext<'tcx>,
_location: Location) {
- if let Place::Local(RETURN_POINTER) = *lvalue {
- // Return pointer; update the lvalue itself
- *lvalue = self.destination.clone();
+ if let Place::Local(RETURN_PLACE) = *place {
+ // Return pointer; update the place itself
+ *place = self.destination.clone();
} else {
- self.super_lvalue(lvalue, _ctxt, _location);
+ self.super_place(place, _ctxt, _location);
}
}
fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) {
if self.optimizations.and_stars.remove(&location) {
debug!("Replacing `&*`: {:?}", rvalue);
- let new_lvalue = match *rvalue {
+ let new_place = match *rvalue {
Rvalue::Ref(_, _, Place::Projection(ref mut projection)) => {
// Replace with dummy
mem::replace(&mut projection.base, Place::Local(Local::new(0)))
}
_ => bug!("Detected `&*` but didn't find `&*`!"),
};
- *rvalue = Rvalue::Use(Operand::Copy(new_lvalue))
+ *rvalue = Rvalue::Use(Operand::Copy(new_place))
}
if let Some(constant) = self.optimizations.arrays_lengths.remove(&location) {
}
}
- if let Rvalue::Len(ref lvalue) = *rvalue {
- let lvalue_ty = lvalue.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx);
- if let TypeVariants::TyArray(_, len) = lvalue_ty.sty {
+ if let Rvalue::Len(ref place) = *rvalue {
+ let place_ty = place.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx);
+ if let TypeVariants::TyArray(_, len) = place_ty.sty {
let span = self.mir.source_info(location).span;
let ty = self.tcx.types.usize;
let literal = Literal::Value { value: len };
};
let bin_statement = block.statements.pop().unwrap();
- let (source_info, lvalue, lhs, mut rhs) = match bin_statement {
+ let (source_info, place, lhs, mut rhs) = match bin_statement {
Statement {
source_info,
kind: StatementKind::Assign(
- lvalue,
+ place,
Rvalue::BinaryOp(_, lhs, rhs))
- } => (source_info, lvalue, lhs, rhs),
+ } => (source_info, place, lhs, rhs),
Statement {
source_info,
kind: StatementKind::Assign(
- lvalue,
+ place,
Rvalue::CheckedBinaryOp(_, lhs, rhs))
- } => (source_info, lvalue, lhs, rhs),
+ } => (source_info, place, lhs, rhs),
_ => bug!("Statement doesn't match pattern any more?"),
};
}
let call_did = check_lang_item_type(
- lang_item, &lvalue, &lhs, &rhs, local_decls, tcx);
+ lang_item, &place, &lhs, &rhs, local_decls, tcx);
let bb = BasicBlock::new(cur_len + new_blocks.len());
new_blocks.push(after_call);
func: Operand::function_handle(tcx, call_did,
Slice::empty(), source_info.span),
args: vec![lhs, rhs],
- destination: Some((lvalue, bb)),
+ destination: Some((place, bb)),
cleanup: None,
},
});
fn check_lang_item_type<'a, 'tcx, D>(
lang_item: LangItem,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
lhs: &Operand<'tcx>,
rhs: &Operand<'tcx>,
local_decls: &D,
let sig = tcx.no_late_bound_regions(&poly_sig).unwrap();
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
- let lvalue_ty = lvalue.ty(local_decls, tcx).to_ty(tcx);
- let expected = [lhs_ty, rhs_ty, lvalue_ty];
+ let place_ty = place.ty(local_decls, tcx).to_ty(tcx);
+ let expected = [lhs_ty, rhs_ty, place_ty];
assert_eq!(sig.inputs_and_output[..], expected,
"lang item {}", tcx.def_symbol_name(did));
did
&mut self,
location: Location,
borrow_region: ty::Region<'tcx>,
- borrowed_lv: &Place<'tcx>,
+ borrowed_place: &Place<'tcx>,
) {
- if let Projection(ref proj) = *borrowed_lv {
+ if let Projection(ref proj) = *borrowed_place {
let PlaceProjection { ref base, ref elem } = **proj;
if let ProjectionElem::Deref = *elem {
// where L is the path that is borrowed. In that case, we have
// to add the reborrow constraints (which don't fall out
// naturally from the type-checker).
- if let Rvalue::Ref(region, _bk, ref borrowed_lv) = *rvalue {
- self.add_reborrow_constraint(location, region, borrowed_lv);
+ if let Rvalue::Ref(region, _bk, ref borrowed_place) = *rvalue {
+ self.add_reborrow_constraint(location, region, borrowed_place);
}
self.super_rvalue(rvalue, location);
statement_index: usize::MAX
});
- self.assign(RETURN_POINTER, rvalue, span);
+ self.assign(RETURN_PLACE, rvalue, span);
self.source.promoted.push(self.promoted);
}
}
}
};
- // Declare return pointer local
- let initial_locals = iter::once(LocalDecl::new_return_pointer(ty, span))
+ // Declare return place local
+ let initial_locals = iter::once(LocalDecl::new_return_place(ty, span))
.collect();
let mut promoter = Promoter {
// Function argument.
const FN_ARGUMENT = 1 << 2;
- // Static lvalue or move from a static.
+ // Static place or move from a static.
const STATIC = 1 << 3;
// Reference to a static.
store(&mut self.temp_qualif[index])
}
Place::Local(index) if self.mir.local_kind(index) == LocalKind::ReturnPointer => {
- debug!("store to return pointer {:?}", index);
+ debug!("store to return place {:?}", index);
store(&mut self.return_qualif)
}
// This must be an explicit assignment.
_ => {
// Catch more errors in the destination.
- self.visit_lvalue(dest, PlaceContext::Store, location);
+ self.visit_place(dest, PlaceContext::Store, location);
self.statement_like();
}
}
}
}
- fn visit_lvalue(&mut self,
- lvalue: &Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- match *lvalue {
+ match *place {
Place::Local(ref local) => self.visit_local(local, context, location),
Place::Static(ref global) => {
self.add(Qualif::STATIC);
}
Place::Projection(ref proj) => {
self.nest(|this| {
- this.super_lvalue(lvalue, context, location);
+ this.super_place(place, context, location);
match proj.elem {
ProjectionElem::Deref => {
if !this.try_consume() {
"cannot refer to the interior of another \
static, use a constant instead");
}
- let ty = lvalue.ty(this.mir, this.tcx).to_ty(this.tcx);
+ let ty = place.ty(this.mir, this.tcx).to_ty(this.tcx);
this.qualif.restrict(ty, this.tcx, this.param_env);
}
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
match *operand {
- Operand::Copy(ref lvalue) |
- Operand::Move(ref lvalue) => {
+ Operand::Copy(ref place) |
+ Operand::Move(ref place) => {
self.nest(|this| {
this.super_operand(operand, location);
this.try_consume();
});
// Mark the consumed locals to indicate later drops are noops.
- if let Place::Local(local) = *lvalue {
+ if let Place::Local(local) = *place {
self.local_needs_drop[local] = None;
}
}
}
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
- // Recurse through operands and lvalues.
+ // Recurse through operands and places.
self.super_rvalue(rvalue, location);
match *rvalue {
Rvalue::Discriminant(..) => {}
Rvalue::Len(_) => {
- // Static lvalues in consts would have errored already,
+ // Static places in consts would have errored already,
// don't treat length checks as reads from statics.
self.qualif = self.qualif - Qualif::STATIC;
}
- Rvalue::Ref(_, kind, ref lvalue) => {
- // Static lvalues in consts would have errored already,
+ Rvalue::Ref(_, kind, ref place) => {
+ // Static places in consts would have errored already,
// only keep track of references to them here.
if self.qualif.intersects(Qualif::STATIC) {
self.qualif = self.qualif - Qualif::STATIC;
self.add(Qualif::STATIC_REF);
}
- let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
+ let ty = place.ty(self.mir, self.tcx).to_ty(self.tcx);
if kind == BorrowKind::Mut {
// In theory, any zero-sized value could be borrowed
// mutably without consequences. However, only &mut []
let candidate = Candidate::Ref(location);
if !self.qualif.intersects(Qualif::NEVER_PROMOTE) {
// We can only promote direct borrows of temps.
- if let Place::Local(local) = *lvalue {
+ if let Place::Local(local) = *place {
if self.mir.local_kind(local) == LocalKind::Temp {
self.promotion_candidates.push(candidate);
}
}
self.assign(dest, location);
}
- } else if let TerminatorKind::Drop { location: ref lvalue, .. } = *kind {
+ } else if let TerminatorKind::Drop { location: ref place, .. } = *kind {
self.super_terminator_kind(bb, kind, location);
// Deny *any* live drops anywhere other than functions.
if self.mode != Mode::Fn {
// HACK(eddyb) Emulate a bit of dataflow analysis,
// conservatively, that drop elaboration will do.
- let needs_drop = if let Place::Local(local) = *lvalue {
+ let needs_drop = if let Place::Local(local) = *place {
self.local_needs_drop[local]
} else {
None
if let Some(span) = needs_drop {
// Double-check the type being dropped, to minimize false positives.
- let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx);
+ let ty = place.ty(self.mir, self.tcx).to_ty(self.tcx);
if ty.needs_drop(self.tcx, self.param_env) {
struct_span_err!(self.tcx.sess, span, E0493,
"destructors cannot be evaluated at compile-time")
self.nest(|this| {
this.visit_source_info(&statement.source_info);
match statement.kind {
- StatementKind::Assign(ref lvalue, ref rvalue) => {
- this.visit_assign(bb, lvalue, rvalue, location);
+ StatementKind::Assign(ref place, ref rvalue) => {
+ this.visit_assign(bb, place, rvalue, location);
}
StatementKind::SetDiscriminant { .. } |
StatementKind::StorageLive(_) |
None => return,
};
assert!(args.len() == 1);
- let peek_arg_lval = match args[0] {
- mir::Operand::Copy(ref lval @ mir::Place::Local(_)) |
- mir::Operand::Move(ref lval @ mir::Place::Local(_)) => Some(lval),
+ let peek_arg_place = match args[0] {
+ mir::Operand::Copy(ref place @ mir::Place::Local(_)) |
+ mir::Operand::Move(ref place @ mir::Place::Local(_)) => Some(place),
_ => None,
};
- let peek_arg_lval = match peek_arg_lval {
+ let peek_arg_place = match peek_arg_place {
Some(arg) => arg,
None => {
tcx.sess.diagnostic().span_err(
let mut kill = results.0.sets.kill_set_for(bb.index()).to_owned();
// Emulate effect of all statements in the block up to (but not
- // including) the borrow within `peek_arg_lval`. Do *not* include
- // call to `peek_arg_lval` itself (since we are peeking the state
+ // including) the borrow within `peek_arg_place`. Do *not* include
+ // call to `peek_arg_place` itself (since we are peeking the state
// of the argument at time immediate preceding Call to
// `rustc_peek`).
for (j, stmt) in statements.iter().enumerate() {
debug!("rustc_peek: ({:?},{}) {:?}", bb, j, stmt);
- let (lvalue, rvalue) = match stmt.kind {
- mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
- (lvalue, rvalue)
+ let (place, rvalue) = match stmt.kind {
+ mir::StatementKind::Assign(ref place, ref rvalue) => {
+ (place, rvalue)
}
mir::StatementKind::StorageLive(_) |
mir::StatementKind::StorageDead(_) |
"sanity_check should run before Deaggregator inserts SetDiscriminant"),
};
- if lvalue == peek_arg_lval {
- if let mir::Rvalue::Ref(_, mir::BorrowKind::Shared, ref peeking_at_lval) = *rvalue {
+ if place == peek_arg_place {
+ if let mir::Rvalue::Ref(_, mir::BorrowKind::Shared, ref peeking_at_place) = *rvalue {
// Okay, our search is over.
- match move_data.rev_lookup.find(peeking_at_lval) {
+ match move_data.rev_lookup.find(peeking_at_place) {
LookupResult::Exact(peek_mpi) => {
let bit_state = sets.on_entry.contains(&peek_mpi);
debug!("rustc_peek({:?} = &{:?}) bit_state: {}",
- lvalue, peeking_at_lval, bit_state);
+ place, peeking_at_place, bit_state);
if !bit_state {
tcx.sess.span_err(span, "rustc_peek: bit not set");
}
}
}
- let lhs_mpi = move_data.rev_lookup.find(lvalue);
+ let lhs_mpi = move_data.rev_lookup.find(place);
- debug!("rustc_peek: computing effect on lvalue: {:?} ({:?}) in stmt: {:?}",
- lvalue, lhs_mpi, stmt);
+ debug!("rustc_peek: computing effect on place: {:?} ({:?}) in stmt: {:?}",
+ place, lhs_mpi, stmt);
// reset GEN and KILL sets before emulating their effect.
for e in sets.gen_set.words_mut() { *e = 0; }
for e in sets.kill_set.words_mut() { *e = 0; }
}
}
- fn visit_lvalue(
+ fn visit_place(
&mut self,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
context: PlaceContext,
location: Location,
) {
- self.sanitize_lvalue(lvalue, location, context);
+ self.sanitize_place(place, location, context);
}
fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
}
}
- fn sanitize_lvalue(&mut self,
- lvalue: &Place<'tcx>,
+ fn sanitize_place(&mut self,
+ place: &Place<'tcx>,
location: Location,
context: PlaceContext)
-> PlaceTy<'tcx> {
- debug!("sanitize_lvalue: {:?}", lvalue);
- let lvalue_ty = match *lvalue {
+ debug!("sanitize_place: {:?}", place);
+ let place_ty = match *place {
Place::Local(index) => PlaceTy::Ty {
ty: self.mir.local_decls[index].ty,
},
Place::Static(box Static { def_id, ty: sty }) => {
- let sty = self.sanitize_type(lvalue, sty);
+ let sty = self.sanitize_type(place, sty);
let ty = self.tcx().type_of(def_id);
let ty = self.cx.normalize(&ty, location);
if let Err(terr) = self.cx
{
span_mirbug!(
self,
- lvalue,
+ place,
"bad static type ({:?}: {:?}): {:?}",
ty,
sty,
} else {
PlaceContext::Projection(Mutability::Not)
};
- let base_ty = self.sanitize_lvalue(&proj.base, location, base_context);
+ let base_ty = self.sanitize_place(&proj.base, location, base_context);
if let PlaceTy::Ty { ty } = base_ty {
if ty.references_error() {
assert!(self.errors_reported);
};
}
}
- self.sanitize_projection(base_ty, &proj.elem, lvalue, location)
+ self.sanitize_projection(base_ty, &proj.elem, place, location)
}
};
if let PlaceContext::Copy = context {
- let ty = lvalue_ty.to_ty(self.tcx());
+ let ty = place_ty.to_ty(self.tcx());
if self.cx.infcx.type_moves_by_default(self.cx.param_env, ty, DUMMY_SP) {
- span_mirbug!(self, lvalue,
+ span_mirbug!(self, place,
"attempted copy of non-Copy type ({:?})", ty);
}
}
- lvalue_ty
+ place_ty
}
fn sanitize_projection(
&mut self,
base: PlaceTy<'tcx>,
pi: &PlaceElem<'tcx>,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
location: Location,
) -> PlaceTy<'tcx> {
- debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, lvalue);
+ debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, place);
let tcx = self.tcx();
let base_ty = base.to_ty(tcx);
let span = self.last_span;
let deref_ty = base_ty.builtin_deref(true, ty::LvaluePreference::NoPreference);
PlaceTy::Ty {
ty: deref_ty.map(|t| t.ty).unwrap_or_else(|| {
- span_mirbug_and_err!(self, lvalue, "deref of non-pointer {:?}", base_ty)
+ span_mirbug_and_err!(self, place, "deref of non-pointer {:?}", base_ty)
}),
}
}
} else {
PlaceTy::Ty {
ty: base_ty.builtin_index().unwrap_or_else(|| {
- span_mirbug_and_err!(self, lvalue, "index of non-array {:?}", base_ty)
+ span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
}),
}
}
// consider verifying in-bounds
PlaceTy::Ty {
ty: base_ty.builtin_index().unwrap_or_else(|| {
- span_mirbug_and_err!(self, lvalue, "index of non-array {:?}", base_ty)
+ span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
}),
}
}
} else {
span_mirbug_and_err!(
self,
- lvalue,
+ place,
"taking too-small slice of {:?}",
base_ty
)
}
}
ty::TySlice(..) => base_ty,
- _ => span_mirbug_and_err!(self, lvalue, "slice of non-array {:?}", base_ty),
+ _ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty),
},
},
ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty {
PlaceTy::Ty {
ty: span_mirbug_and_err!(
self,
- lvalue,
+ place,
"cast to variant #{:?} but enum only has {:?}",
index,
adt_def.variants.len()
_ => PlaceTy::Ty {
ty: span_mirbug_and_err!(
self,
- lvalue,
+ place,
"can't downcast {:?} as {:?}",
base_ty,
adt_def1
},
},
ProjectionElem::Field(field, fty) => {
- let fty = self.sanitize_type(lvalue, fty);
- match self.field_ty(lvalue, base, field, location) {
+ let fty = self.sanitize_type(place, fty);
+ match self.field_ty(place, base, field, location) {
Ok(ty) => {
if let Err(terr) = self.cx.eq_types(span, ty, fty, location.at_self()) {
span_mirbug!(
self,
- lvalue,
+ place,
"bad field access ({:?}: {:?}): {:?}",
ty,
fty,
}
Err(FieldAccessError::OutOfRange { field_count }) => span_mirbug!(
self,
- lvalue,
+ place,
"accessed field #{} but variant only has {}",
field.index(),
field_count
debug!("check_stmt: {:?}", stmt);
let tcx = self.tcx();
match stmt.kind {
- StatementKind::Assign(ref lv, ref rv) => {
- let lv_ty = lv.ty(mir, tcx).to_ty(tcx);
+ StatementKind::Assign(ref place, ref rv) => {
+ let place_ty = place.ty(mir, tcx).to_ty(tcx);
let rv_ty = rv.ty(mir, tcx);
if let Err(terr) =
- self.sub_types(rv_ty, lv_ty, location.at_successor_within_block())
+ self.sub_types(rv_ty, place_ty, location.at_successor_within_block())
{
span_mirbug!(
self,
stmt,
"bad assignment ({:?} = {:?}): {:?}",
- lv_ty,
+ place_ty,
rv_ty,
terr
);
self.check_rvalue(mir, rv, location);
}
StatementKind::SetDiscriminant {
- ref lvalue,
+ ref place,
variant_index,
} => {
- let lvalue_type = lvalue.ty(mir, tcx).to_ty(tcx);
- let adt = match lvalue_type.sty {
+ let place_type = place.ty(mir, tcx).to_ty(tcx);
+ let adt = match place_type.sty {
TypeVariants::TyAdt(adt, _) if adt.is_enum() => adt,
_ => {
span_bug!(
stmt.source_info.span,
"bad set discriminant ({:?} = {:?}): lhs is not an enum",
- lvalue,
+ place,
variant_index
);
}
span_bug!(
stmt.source_info.span,
"bad set discriminant ({:?} = {:?}): value of of range",
- lvalue,
+ place,
variant_index
);
};
target,
unwind,
} => {
- let lv_ty = location.ty(mir, tcx).to_ty(tcx);
+ let place_ty = location.ty(mir, tcx).to_ty(tcx);
let rv_ty = value.ty(mir, tcx);
let locations = Locations {
from_location: term_location,
at_location: target.start_location(),
};
- if let Err(terr) = self.sub_types(rv_ty, lv_ty, locations) {
+ if let Err(terr) = self.sub_types(rv_ty, place_ty, locations) {
span_mirbug!(
self,
term,
"bad DropAndReplace ({:?} = {:?}): {:?}",
- lv_ty,
+ place_ty,
rv_ty,
terr
);
from_location: term_location,
at_location: unwind.start_location(),
};
- if let Err(terr) = self.sub_types(rv_ty, lv_ty, locations) {
+ if let Err(terr) = self.sub_types(rv_ty, place_ty, locations) {
span_mirbug!(
self,
term,
"bad DropAndReplace ({:?} = {:?}): {:?}",
- lv_ty,
+ place_ty,
rv_ty,
terr
);
/// its successor within the block is the at-location. This means
/// that any required region relationships must hold only upon
/// **exiting** the statement/terminator indicated by `self`. This
- /// is for example used when you have a `lv = rv` statement: it
- /// indicates that the `typeof(rv) <: typeof(lv)` as of the
+ /// is for example used when you have a `place = rv` statement: it
+ /// indicates that the `typeof(rv) <: typeof(place)` as of the
/// **next** statement.
fn at_successor_within_block(self) -> Locations;
}
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
-/// Return `true` if this lvalue is allowed to be less aligned
+/// Return `true` if this place is allowed to be less aligned
/// than its containing struct (because it is within a packed
/// struct).
pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
local_decls: &L,
param_env: ty::ParamEnv<'tcx>,
- lvalue: &Place<'tcx>)
+ place: &Place<'tcx>)
-> bool
where L: HasLocalDecls<'tcx>
{
- debug!("is_disaligned({:?})", lvalue);
- if !is_within_packed(tcx, local_decls, lvalue) {
- debug!("is_disaligned({:?}) - not within packed", lvalue);
+ debug!("is_disaligned({:?})", place);
+ if !is_within_packed(tcx, local_decls, place) {
+ debug!("is_disaligned({:?}) - not within packed", place);
return false
}
- let ty = lvalue.ty(local_decls, tcx).to_ty(tcx);
+ let ty = place.ty(local_decls, tcx).to_ty(tcx);
match tcx.layout_raw(param_env.and(ty)) {
Ok(layout) if layout.align.abi() == 1 => {
// if the alignment is 1, the type can't be further
// disaligned.
- debug!("is_disaligned({:?}) - align = 1", lvalue);
+ debug!("is_disaligned({:?}) - align = 1", place);
false
}
_ => {
- debug!("is_disaligned({:?}) - true", lvalue);
+ debug!("is_disaligned({:?}) - true", place);
true
}
}
fn is_within_packed<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
local_decls: &L,
- lvalue: &Place<'tcx>)
+ place: &Place<'tcx>)
-> bool
where L: HasLocalDecls<'tcx>
{
- let mut lvalue = lvalue;
+ let mut place = place;
while let &Place::Projection(box Projection {
ref base, ref elem
- }) = lvalue {
+ }) = place {
match *elem {
// encountered a Deref, which is ABI-aligned
ProjectionElem::Deref => break,
}
_ => {}
}
- lvalue = base;
+ place = base;
}
false
where F: for<'a> FnMut(&'a mut Local,
PlaceContext<'tcx>,
Location) {
- for lvalue_use in &self.info[local].defs_and_uses {
+ for place_use in &self.info[local].defs_and_uses {
MutateUseVisitor::new(local,
&mut callback,
- mir).visit_location(mir, lvalue_use.location)
+ mir).visit_location(mir, place_use.location)
}
}
}
pub fn def_count(&self) -> usize {
- self.defs_and_uses.iter().filter(|lvalue_use| lvalue_use.context.is_mutating_use()).count()
+ self.defs_and_uses.iter().filter(|place_use| place_use.context.is_mutating_use()).count()
}
pub fn def_count_not_including_drop(&self) -> usize {
pub fn defs_not_including_drop(
&self,
) -> iter::Filter<slice::Iter<Use<'tcx>>, fn(&&Use<'tcx>) -> bool> {
- self.defs_and_uses.iter().filter(|lvalue_use| {
- lvalue_use.context.is_mutating_use() && !lvalue_use.context.is_drop()
+ self.defs_and_uses.iter().filter(|place_use| {
+ place_use.context.is_mutating_use() && !place_use.context.is_drop()
})
}
pub fn use_count(&self) -> usize {
- self.defs_and_uses.iter().filter(|lvalue_use| {
- lvalue_use.context.is_nonmutating_use()
+ self.defs_and_uses.iter().filter(|place_use| {
+ place_use.context.is_nonmutating_use()
}).count()
}
}
source_info: SourceInfo,
- lvalue: &'l Place<'tcx>,
+ place: &'l Place<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
pub fn elaborate_drop<'b, 'tcx, D>(
elaborator: &mut D,
source_info: SourceInfo,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
where D: DropElaborator<'b, 'tcx>
{
DropCtxt {
- elaborator, source_info, lvalue, path, succ, unwind
+ elaborator, source_info, place, path, succ, unwind
}.elaborate_drop(bb)
}
impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
where D: DropElaborator<'b, 'tcx>
{
- fn lvalue_ty(&self, lvalue: &Place<'tcx>) -> Ty<'tcx> {
- lvalue.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx())
+ fn place_ty(&self, place: &Place<'tcx>) -> Ty<'tcx> {
+ place.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx())
}
fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> {
let loc = self.terminator_loc(bb);
self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Drop {
- location: self.lvalue.clone(),
+ location: self.place.clone(),
target: self.succ,
unwind: self.unwind.into_option(),
});
}
}
- /// Return the lvalue and move path for each field of `variant`,
+ /// Return the place and move path for each field of `variant`,
/// (the move path is `None` if the field is a rest field).
fn move_paths_for_fields(&self,
- base_lv: &Place<'tcx>,
+ base_place: &Place<'tcx>,
variant_path: D::Path,
variant: &'tcx ty::VariantDef,
substs: &'tcx Substs<'tcx>)
&f.ty(self.tcx(), substs),
self.elaborator.param_env()
);
- (base_lv.clone().field(field, field_ty), subpath)
+ (base_place.clone().field(field, field_ty), subpath)
}).collect()
}
fn drop_subpath(&mut self,
- lvalue: &Place<'tcx>,
+ place: &Place<'tcx>,
path: Option<D::Path>,
succ: BasicBlock,
unwind: Unwind)
-> BasicBlock
{
if let Some(path) = path {
- debug!("drop_subpath: for std field {:?}", lvalue);
+ debug!("drop_subpath: for std field {:?}", place);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
- path, lvalue, succ, unwind,
+ path, place, succ, unwind,
}.elaborated_drop_block()
} else {
- debug!("drop_subpath: for rest field {:?}", lvalue);
+ debug!("drop_subpath: for rest field {:?}", place);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
- lvalue, succ, unwind,
+ place, succ, unwind,
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path
{
Some(succ).into_iter().chain(
fields.iter().rev().zip(unwind_ladder)
- .map(|(&(ref lv, path), &unwind_succ)| {
- succ = self.drop_subpath(lv, path, succ, unwind_succ);
+ .map(|(&(ref place, path), &unwind_succ)| {
+ succ = self.drop_subpath(place, path, succ, unwind_succ);
succ
})
).collect()
debug!("drop_ladder({:?}, {:?})", self, fields);
let mut fields = fields;
- fields.retain(|&(ref lvalue, _)| {
- self.lvalue_ty(lvalue).needs_drop(self.tcx(), self.elaborator.param_env())
+ fields.retain(|&(ref place, _)| {
+ self.place_ty(place).needs_drop(self.tcx(), self.elaborator.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
let fields = tys.iter().enumerate().map(|(i, &ty)| {
- (self.lvalue.clone().field(Field::new(i), ty),
+ (self.place.clone().field(Field::new(i), ty),
self.elaborator.field_subpath(self.path, Field::new(i)))
}).collect();
{
debug!("open_drop_for_box({:?}, {:?})", self, ty);
- let interior = self.lvalue.clone().deref();
+ let interior = self.place.clone().deref();
let interior_path = self.elaborator.deref_subpath(self.path);
let succ = self.succ; // FIXME(#6393)
let (succ, unwind) = self.drop_ladder_bottom();
if !adt.is_enum() {
let fields = self.move_paths_for_fields(
- self.lvalue,
+ self.place,
self.path,
&adt.variants[0],
substs
let subpath = self.elaborator.downcast_subpath(
self.path, variant_index);
if let Some(variant_path) = subpath {
- let base_lv = self.lvalue.clone().elem(
+ let base_place = self.place.clone().elem(
ProjectionElem::Downcast(adt, variant_index)
);
let fields = self.move_paths_for_fields(
- &base_lv,
+ &base_place,
variant_path,
&adt.variants[variant_index],
substs);
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
let discr = Place::Local(self.new_temp(discr_ty));
- let discr_rv = Rvalue::Discriminant(self.lvalue.clone());
+ let discr_rv = Rvalue::Discriminant(self.place.clone());
let switch_block = BasicBlockData {
statements: vec![self.assign(&discr, discr_rv)],
terminator: Some(Terminator {
let tcx = self.tcx();
let drop_trait = tcx.lang_items().drop_trait().unwrap();
let drop_fn = tcx.associated_items(drop_trait).next().unwrap();
- let ty = self.lvalue_ty(self.lvalue);
+ let ty = self.place_ty(self.place);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty,
mutbl: hir::Mutability::MutMutable
});
- let ref_lvalue = self.new_temp(ref_ty);
+ let ref_place = self.new_temp(ref_ty);
let unit_temp = Place::Local(self.new_temp(tcx.mk_nil()));
let result = BasicBlockData {
statements: vec![self.assign(
- &Place::Local(ref_lvalue),
- Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone())
+ &Place::Local(ref_place),
+ Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.place.clone())
)],
terminator: Some(Terminator {
kind: TerminatorKind::Call {
func: Operand::function_handle(tcx, drop_fn.def_id, substs,
self.source_info.span),
- args: vec![Operand::Move(Place::Local(ref_lvalue))],
+ args: vec![Operand::Move(Place::Local(ref_place))],
destination: Some((unit_temp, succ)),
cleanup: unwind.into_option(),
},
ptr_based: bool)
-> BasicBlock
{
- let copy = |lv: &Place<'tcx>| Operand::Copy(lv.clone());
- let move_ = |lv: &Place<'tcx>| Operand::Move(lv.clone());
+ let copy = |place: &Place<'tcx>| Operand::Copy(place.clone());
+ let move_ = |place: &Place<'tcx>| Operand::Move(place.clone());
let tcx = self.tcx();
let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
(Rvalue::Ref(
tcx.types.re_erased,
BorrowKind::Mut,
- self.lvalue.clone().index(cur)),
+ self.place.clone().index(cur)),
Rvalue::BinaryOp(BinOp::Add, copy(&Place::Local(cur)), one))
};
let tcx = self.tcx();
- let move_ = |lv: &Place<'tcx>| Operand::Move(lv.clone());
+ let move_ = |place: &Place<'tcx>| Operand::Move(place.clone());
let size = &Place::Local(self.new_temp(tcx.types.usize));
let size_is_zero = &Place::Local(self.new_temp(tcx.types.bool));
let base_block = BasicBlockData {
self.elaborator.patch().new_block(base_block)
}
- // create a pair of drop-loops of `lvalue`, which drops its contents
+ // create a pair of drop-loops of `place`, which drops its contents
// even in the case of 1 panic. If `ptr_based`, create a pointer loop,
// otherwise create an index loop.
fn drop_loop_pair(&mut self, ety: Ty<'tcx>, ptr_based: bool) -> BasicBlock {
let cur = Place::Local(cur);
let zero = self.constant_usize(0);
let mut drop_block_stmts = vec![];
- drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.lvalue.clone())));
+ drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.place.clone())));
if ptr_based {
- let tmp_ty = tcx.mk_mut_ptr(self.lvalue_ty(self.lvalue));
+ let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
let tmp = Place::Local(self.new_temp(tmp_ty));
// tmp = &LV;
// cur = tmp as *mut T;
// end = Offset(cur, len);
drop_block_stmts.push(self.assign(&tmp, Rvalue::Ref(
- tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone()
+ tcx.types.re_erased, BorrowKind::Mut, self.place.clone()
)));
drop_block_stmts.push(self.assign(&cur, Rvalue::Cast(
CastKind::Misc, Operand::Move(tmp.clone()), iter_ty
/// This creates a "drop ladder" that drops the needed fields of the
/// ADT, both in the success case or if one of the destructors fail.
fn open_drop<'a>(&mut self) -> BasicBlock {
- let ty = self.lvalue_ty(self.lvalue);
+ let ty = self.place_ty(self.place);
match ty.sty {
ty::TyClosure(def_id, substs) |
// Note that `elaborate_drops` only drops the upvars of a generator,
}
}
- /// Return a basic block that drop an lvalue using the context
+ /// Return a basic block that drop a place using the context
/// and path in `c`. If `mode` is something, also clear `c`
/// according to it.
///
/// if FLAG(self.path)
/// if let Some(mode) = mode: FLAG(self.path)[mode] = false
- /// drop(self.lv)
+ /// drop(self.place)
fn complete_drop<'a>(&mut self,
drop_mode: Option<DropFlagMode>,
succ: BasicBlock,
let call = TerminatorKind::Call {
func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
- args: vec![Operand::Move(self.lvalue.clone())],
+ args: vec![Operand::Move(self.place.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}; // FIXME(#6393)
fn drop_block<'a>(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let block = TerminatorKind::Drop {
- location: self.lvalue.clone(),
+ location: self.place.clone(),
target,
unwind: unwind.into_option()
};
self.new_statements.push((loc, stmt));
}
- pub fn add_assign(&mut self, loc: Location, lv: Place<'tcx>, rv: Rvalue<'tcx>) {
- self.add_statement(loc, StatementKind::Assign(lv, rv));
+ pub fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
+ self.add_statement(loc, StatementKind::Assign(place, rv));
}
pub fn apply(self, mir: &mut Mir<'tcx>) {
}
}
- // Print return pointer
+ // Print return place
let indented_retptr = format!("{}let mut {:?}: {};",
INDENT,
- RETURN_POINTER,
- mir.local_decls[RETURN_POINTER].ty);
- writeln!(w, "{0:1$} // return pointer",
+ RETURN_PLACE,
+ mir.local_decls[RETURN_PLACE].ty);
+ writeln!(w, "{0:1$} // return place",
indented_retptr,
ALIGN)?;
self.super_operand(operand, location);
}
- fn visit_lvalue(&mut self,
- lvalue: &Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &Place<'tcx>,
context: mir_visit::PlaceContext<'tcx>,
location: Location) {
- self.record("Place", lvalue);
- self.record(match *lvalue {
+ self.record("Place", place);
+ self.record(match *place {
Place::Local(..) => "Place::Local",
Place::Static(..) => "Place::Static",
Place::Projection(..) => "Place::Projection",
- }, lvalue);
- self.super_lvalue(lvalue, context, location);
+ }, place);
+ self.super_place(place, context, location);
}
fn visit_projection(&mut self,
- lvalue: &PlaceProjection<'tcx>,
+ place: &PlaceProjection<'tcx>,
context: mir_visit::PlaceContext<'tcx>,
location: Location) {
- self.record("PlaceProjection", lvalue);
- self.super_projection(lvalue, context, location);
+ self.record("PlaceProjection", place);
+ self.super_projection(place, context, location);
}
fn visit_projection_elem(&mut self,
- lvalue: &PlaceElem<'tcx>,
+ place: &PlaceElem<'tcx>,
context: mir_visit::PlaceContext<'tcx>,
location: Location) {
- self.record("PlaceElem", lvalue);
- self.record(match *lvalue {
+ self.record("PlaceElem", place);
+ self.record(match *place {
ProjectionElem::Deref => "PlaceElem::Deref",
ProjectionElem::Subslice { .. } => "PlaceElem::Subslice",
ProjectionElem::Field(..) => "PlaceElem::Field",
ProjectionElem::Index(..) => "PlaceElem::Index",
ProjectionElem::ConstantIndex { .. } => "PlaceElem::ConstantIndex",
ProjectionElem::Downcast(..) => "PlaceElem::Downcast",
- }, lvalue);
- self.super_projection_elem(lvalue, context, location);
+ }, place);
+ self.super_projection_elem(place, context, location);
}
fn visit_constant(&mut self,
use cabi_nvptx;
use cabi_nvptx64;
use cabi_hexagon;
-use mir::lvalue::{Alignment, PlaceRef};
+use mir::place::{Alignment, PlaceRef};
use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
self.mode == PassMode::Ignore
}
- /// Get the LLVM type for an lvalue of the original Rust type of
+ /// Get the LLVM type for an place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
self.layout.llvm_type(ccx)
}
/// Store a direct/indirect value described by this ArgType into a
- /// lvalue for the original Rust type of this argument/return.
+ /// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
use rustc::hir;
-use mir::lvalue::PlaceRef;
+use mir::place::PlaceRef;
use mir::operand::OperandValue;
use std::ffi::CString;
// Prepare the output operands
let mut indirect_outputs = vec![];
- for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() {
+ for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
- inputs.push(lvalue.load(bcx).immediate());
+ inputs.push(place.load(bcx).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
- indirect_outputs.push(lvalue.load(bcx).immediate());
+ indirect_outputs.push(place.load(bcx).immediate());
} else {
- output_types.push(lvalue.layout.llvm_type(bcx.ccx));
+ output_types.push(place.layout.llvm_type(bcx.ccx));
}
}
if !indirect_outputs.is_empty() {
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
- for (i, (_, &lvalue)) in outputs.enumerate() {
+ for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
- OperandValue::Immediate(v).store(bcx, lvalue);
+ OperandValue::Immediate(v).store(bcx, place);
}
// Store mark in a metadata node so we can map LLVM errors
use rustc::session::Session;
use rustc_incremental;
use allocator;
-use mir::lvalue::PlaceRef;
+use mir::place::PlaceRef;
use attributes;
use builder::Builder;
use callee;
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType, PassMode};
-use mir::lvalue::{PlaceRef, Alignment};
+use mir::place::{PlaceRef, Alignment};
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
use type_of::LayoutLlvmExt;
use super::MirContext;
-pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
+pub fn memory_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
let mir = mircx.mir;
let mut analyzer = LocalAnalyzer::new(mircx);
// (e.g. structs) into an alloca unconditionally, just so
// that we don't have to deal with having two pathways
// (gep vs extractvalue etc).
- analyzer.mark_as_lvalue(mir::Local::new(index));
+ analyzer.mark_as_memory(mir::Local::new(index));
}
}
- analyzer.lvalue_locals
+ analyzer.memory_locals
}
struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
cx: &'mir MirContext<'a, 'tcx>,
- lvalue_locals: BitVector,
+ memory_locals: BitVector,
seen_assigned: BitVector
}
fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
let mut analyzer = LocalAnalyzer {
cx: mircx,
- lvalue_locals: BitVector::new(mircx.mir.local_decls.len()),
+ memory_locals: BitVector::new(mircx.mir.local_decls.len()),
seen_assigned: BitVector::new(mircx.mir.local_decls.len())
};
analyzer
}
- fn mark_as_lvalue(&mut self, local: mir::Local) {
- debug!("marking {:?} as lvalue", local);
- self.lvalue_locals.insert(local.index());
+ fn mark_as_memory(&mut self, local: mir::Local) {
+ debug!("marking {:?} as memory", local);
+ self.memory_locals.insert(local.index());
}
fn mark_assigned(&mut self, local: mir::Local) {
if !self.seen_assigned.insert(local.index()) {
- self.mark_as_lvalue(local);
+ self.mark_as_memory(local);
}
}
}
impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
- lvalue: &mir::Place<'tcx>,
+ place: &mir::Place<'tcx>,
rvalue: &mir::Rvalue<'tcx>,
location: Location) {
- debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
+ debug!("visit_assign(block={:?}, place={:?}, rvalue={:?})", block, place, rvalue);
- if let mir::Place::Local(index) = *lvalue {
+ if let mir::Place::Local(index) = *place {
self.mark_assigned(index);
if !self.cx.rvalue_creates_operand(rvalue) {
- self.mark_as_lvalue(index);
+ self.mark_as_memory(index);
}
} else {
- self.visit_lvalue(lvalue, PlaceContext::Store, location);
+ self.visit_place(place, PlaceContext::Store, location);
}
self.visit_rvalue(rvalue, location);
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
- if let mir::Operand::Move(ref lvalue) = args[0] {
- self.visit_lvalue(lvalue, PlaceContext::Drop, location);
+ if let mir::Operand::Move(ref place) = args[0] {
+ self.visit_place(place, PlaceContext::Drop, location);
}
}
_ => {}
self.super_terminator_kind(block, kind, location);
}
- fn visit_lvalue(&mut self,
- lvalue: &mir::Place<'tcx>,
+ fn visit_place(&mut self,
+ place: &mir::Place<'tcx>,
context: PlaceContext<'tcx>,
location: Location) {
- debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context);
+ debug!("visit_place(place={:?}, context={:?})", place, context);
let ccx = self.cx.ccx;
- if let mir::Place::Projection(ref proj) = *lvalue {
+ if let mir::Place::Projection(ref proj) = *place {
// Allow uses of projections that are ZSTs or from scalar fields.
let is_consume = match context {
PlaceContext::Copy | PlaceContext::Move => true,
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
- // which would trigger `mark_as_lvalue` on locals.
- self.visit_lvalue(&proj.base, context, location);
+ // which would trigger `mark_as_memory` on locals.
+ self.visit_place(&proj.base, context, location);
return;
}
}
}
- // A deref projection only reads the pointer, never needs the lvalue.
+ // A deref projection only reads the pointer, never needs the place.
if let mir::ProjectionElem::Deref = proj.elem {
- return self.visit_lvalue(&proj.base, PlaceContext::Copy, location);
+ return self.visit_place(&proj.base, PlaceContext::Copy, location);
}
}
- self.super_lvalue(lvalue, context, location);
+ self.super_place(place, context, location);
}
fn visit_local(&mut self,
PlaceContext::Store |
PlaceContext::Borrow { .. } |
PlaceContext::Projection(..) => {
- self.mark_as_lvalue(index);
+ self.mark_as_memory(index);
}
PlaceContext::Drop => {
let ty = mir::Place::Local(index).ty(self.cx.mir, self.cx.ccx.tcx());
let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
- // Only need the lvalue if we're actually dropping it.
+ // Only need the place if we're actually dropping it.
if self.cx.ccx.shared().type_needs_drop(ty) {
- self.mark_as_lvalue(index);
+ self.mark_as_memory(index);
}
}
}
use super::{MirContext, LocalRef};
use super::constant::Const;
-use super::lvalue::{Alignment, PlaceRef};
+use super::place::{Alignment, PlaceRef};
use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
}
PassMode::Direct(_) | PassMode::Pair(..) => {
- let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_POINTER));
+ let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_PLACE));
if let Ref(llval, align) = op.val {
bcx.load(llval, align.non_abi())
} else {
}
PassMode::Cast(cast_ty) => {
- let op = match self.locals[mir::RETURN_POINTER] {
+ let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
- LocalRef::Place(tr_lvalue) => {
+ LocalRef::Place(tr_place) => {
OperandRef {
- val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
- layout: tr_lvalue.layout
+ val: Ref(tr_place.llval, tr_place.alignment),
+ layout: tr_place.layout
}
}
};
}
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned,
- "return pointer is unaligned!");
+ "return place is unaligned!");
llval
}
};
return
}
- let lvalue = self.trans_lvalue(&bcx, location);
- let mut args: &[_] = &[lvalue.llval, lvalue.llextra];
- args = &args[..1 + lvalue.has_extra() as usize];
+ let place = self.trans_place(&bcx, location);
+ let mut args: &[_] = &[place.llval, place.llextra];
+ args = &args[..1 + place.has_extra() as usize];
let (drop_fn, fn_ty) = match ty.sty {
ty::TyDynamic(..) => {
let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn);
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]);
args = &args[..1];
- (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty)
+ (meth::DESTRUCTOR.get_fn(&bcx, place.llextra, &fn_ty), fn_ty)
}
_ => {
(callee::get_fn(bcx.ccx, drop_fn),
match self.locals[index] {
LocalRef::Place(dest) => dest,
LocalRef::Operand(None) => {
- // Handle temporary lvalues, specifically Operand ones, as
+ // Handle temporary places, specifically Operand ones, as
// they don't have allocas
return if fn_ret.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
};
}
LocalRef::Operand(Some(_)) => {
- bug!("lvalue local already assigned to");
+ bug!("place local already assigned to");
}
}
} else {
- self.trans_lvalue(bcx, dest)
+ self.trans_place(bcx, dest)
};
if fn_ret.is_indirect() {
match dest.alignment {
dst: &mir::Place<'tcx>) {
if let mir::Place::Local(index) = *dst {
match self.locals[index] {
- LocalRef::Place(lvalue) => self.trans_transmute_into(bcx, src, lvalue),
+ LocalRef::Place(place) => self.trans_transmute_into(bcx, src, place),
LocalRef::Operand(None) => {
- let dst_layout = bcx.ccx.layout_of(self.monomorphized_lvalue_ty(dst));
+ let dst_layout = bcx.ccx.layout_of(self.monomorphized_place_ty(dst));
assert!(!dst_layout.ty.has_erasable_regions());
- let lvalue = PlaceRef::alloca(bcx, dst_layout, "transmute_temp");
- lvalue.storage_live(bcx);
- self.trans_transmute_into(bcx, src, lvalue);
- let op = lvalue.load(bcx);
- lvalue.storage_dead(bcx);
+ let place = PlaceRef::alloca(bcx, dst_layout, "transmute_temp");
+ place.storage_live(bcx);
+ self.trans_transmute_into(bcx, src, place);
+ let op = place.load(bcx);
+ place.storage_dead(bcx);
self.locals[index] = LocalRef::Operand(Some(op));
}
LocalRef::Operand(Some(op)) => {
}
}
} else {
- let dst = self.trans_lvalue(bcx, dst);
+ let dst = self.trans_place(bcx, dst);
self.trans_transmute_into(bcx, src, dst);
}
}
Nothing,
// Store the return value to the pointer
Store(PlaceRef<'tcx>),
- // Stores an indirect return value to an operand local lvalue
+ // Stores an indirect return value to an operand local place
IndirectOperand(PlaceRef<'tcx>, mir::Local),
- // Stores a direct return value to an operand local lvalue
+ // Stores a direct return value to an operand local place
DirectOperand(mir::Local)
}
use std::fmt;
use std::ptr;
-use super::lvalue::Alignment;
+use super::place::Alignment;
use super::operand::{OperandRef, OperandValue};
use super::MirContext;
self.get_pair(ccx)
}
- fn as_lvalue(&self) -> ConstPlace<'tcx> {
+ fn as_place(&self) -> ConstPlace<'tcx> {
ConstPlace {
base: Base::Value(self.llval),
llextra: ptr::null_mut(),
Static(ValueRef)
}
-/// An lvalue as seen from a constant.
+/// An place as seen from a constant.
#[derive(Copy, Clone)]
struct ConstPlace<'tcx> {
base: Base,
mir::TerminatorKind::Goto { target } => target,
mir::TerminatorKind::Return => {
failure?;
- return self.locals[mir::RETURN_POINTER].clone().unwrap_or_else(|| {
+ return self.locals[mir::RETURN_PLACE].clone().unwrap_or_else(|| {
span_bug!(span, "no returned value in constant");
});
}
}
}
- fn const_lvalue(&self, lvalue: &mir::Place<'tcx>, span: Span)
+ fn const_place(&self, place: &mir::Place<'tcx>, span: Span)
-> Result<ConstPlace<'tcx>, ConstEvalErr<'tcx>> {
let tcx = self.ccx.tcx();
- if let mir::Place::Local(index) = *lvalue {
+ if let mir::Place::Local(index) = *place {
return self.locals[index].clone().unwrap_or_else(|| {
- span_bug!(span, "{:?} not initialized", lvalue)
- }).map(|v| v.as_lvalue());
+ span_bug!(span, "{:?} not initialized", place)
+ }).map(|v| v.as_place());
}
- let lvalue = match *lvalue {
+ let place = match *place {
mir::Place::Local(_) => bug!(), // handled above
mir::Place::Static(box mir::Static { def_id, ty }) => {
ConstPlace {
}
}
mir::Place::Projection(ref projection) => {
- let tr_base = self.const_lvalue(&projection.base, span)?;
+ let tr_base = self.const_place(&projection.base, span)?;
let projected_ty = PlaceTy::Ty { ty: tr_base.ty }
.projection_ty(tcx, &projection.elem);
let base = tr_base.to_const(span);
}
}
};
- Ok(lvalue)
+ Ok(place)
}
fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
-> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
debug!("const_operand({:?} @ {:?})", operand, span);
let result = match *operand {
- mir::Operand::Copy(ref lvalue) |
- mir::Operand::Move(ref lvalue) => {
- Ok(self.const_lvalue(lvalue, span)?.to_const(span))
+ mir::Operand::Copy(ref place) |
+ mir::Operand::Move(ref place) => {
+ Ok(self.const_place(place, span)?.to_const(span))
}
mir::Operand::Constant(ref constant) => {
Const::new(val, cast_ty)
}
- mir::Rvalue::Ref(_, bk, ref lvalue) => {
- let tr_lvalue = self.const_lvalue(lvalue, span)?;
+ mir::Rvalue::Ref(_, bk, ref place) => {
+ let tr_place = self.const_place(place, span)?;
- let ty = tr_lvalue.ty;
+ let ty = tr_place.ty;
let ref_ty = tcx.mk_ref(tcx.types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
- let base = match tr_lvalue.base {
+ let base = match tr_place.base {
Base::Value(llval) => {
// FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
let align = if self.ccx.shared().type_is_sized(ty) {
let ptr = if self.ccx.shared().type_is_sized(ty) {
base
} else {
- C_fat_ptr(self.ccx, base, tr_lvalue.llextra)
+ C_fat_ptr(self.ccx, base, tr_place.llextra)
};
Const::new(ptr, ref_ty)
}
- mir::Rvalue::Len(ref lvalue) => {
- let tr_lvalue = self.const_lvalue(lvalue, span)?;
- Const::new(tr_lvalue.len(self.ccx), tcx.types.usize)
+ mir::Rvalue::Len(ref place) => {
+ let tr_place = self.const_place(place, span)?;
+ Const::new(tr_place.len(self.ccx), tcx.types.usize)
}
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::{self, ValueRef};
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
-use rustc::mir;
-use rustc::mir::tcx::PlaceTy;
-use rustc_data_structures::indexed_vec::Idx;
-use base;
-use builder::Builder;
-use common::{CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big};
-use consts;
-use type_of::LayoutLlvmExt;
-use type_::Type;
-use value::Value;
-use glue;
-
-use std::ptr;
-use std::ops;
-
-use super::{MirContext, LocalRef};
-use super::operand::{OperandRef, OperandValue};
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum Alignment {
- Packed(Align),
- AbiAligned,
-}
-
-impl ops::BitOr for Alignment {
- type Output = Self;
-
- fn bitor(self, rhs: Self) -> Self {
- match (self, rhs) {
- (Alignment::Packed(a), Alignment::Packed(b)) => {
- Alignment::Packed(a.min(b))
- }
- (Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => {
- Alignment::Packed(x)
- }
- (Alignment::AbiAligned, Alignment::AbiAligned) => {
- Alignment::AbiAligned
- }
- }
- }
-}
-
-impl<'a> From<TyLayout<'a>> for Alignment {
- fn from(layout: TyLayout) -> Self {
- if layout.is_packed() {
- Alignment::Packed(layout.align)
- } else {
- Alignment::AbiAligned
- }
- }
-}
-
-impl Alignment {
- pub fn non_abi(self) -> Option<Align> {
- match self {
- Alignment::Packed(x) => Some(x),
- Alignment::AbiAligned => None,
- }
- }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct PlaceRef<'tcx> {
- /// Pointer to the contents of the lvalue
- pub llval: ValueRef,
-
- /// This lvalue's extra data if it is unsized, or null
- pub llextra: ValueRef,
-
- /// Monomorphized type of this lvalue, including variant information
- pub layout: TyLayout<'tcx>,
-
- /// Whether this lvalue is known to be aligned according to its layout
- pub alignment: Alignment,
-}
-
-impl<'a, 'tcx> PlaceRef<'tcx> {
- pub fn new_sized(llval: ValueRef,
- layout: TyLayout<'tcx>,
- alignment: Alignment)
- -> PlaceRef<'tcx> {
- PlaceRef {
- llval,
- llextra: ptr::null_mut(),
- layout,
- alignment
- }
- }
-
- pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
- -> PlaceRef<'tcx> {
- debug!("alloca({:?}: {:?})", name, layout);
- let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align);
- Self::new_sized(tmp, layout, Alignment::AbiAligned)
- }
-
- pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
- if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
- if self.layout.is_unsized() {
- assert!(self.has_extra());
- assert_eq!(count, 0);
- self.llextra
- } else {
- C_usize(ccx, count)
- }
- } else {
- bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
- }
- }
-
- pub fn has_extra(&self) -> bool {
- !self.llextra.is_null()
- }
-
- pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
- debug!("PlaceRef::load: {:?}", self);
-
- assert!(!self.has_extra());
-
- if self.layout.is_zst() {
- return OperandRef::new_zst(bcx.ccx, self.layout);
- }
-
- let scalar_load_metadata = |load, scalar: &layout::Scalar| {
- let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
- let max_next = max.wrapping_add(1);
- let bits = scalar.value.size(bcx.ccx).bits();
- assert!(bits <= 128);
- let mask = !0u128 >> (128 - bits);
- // For a (max) value of -1, max will be `-1 as usize`, which overflows.
- // However, that is fine here (it would still represent the full range),
- // i.e., if the range is everything. The lo==hi case would be
- // rejected by the LLVM verifier (it would mean either an
- // empty set, which is impossible, or the entire range of the
- // type, which is pointless).
- match scalar.value {
- layout::Int(..) if max_next & mask != min & mask => {
- // llvm::ConstantRange can deal with ranges that wrap around,
- // so an overflow on (max + 1) is fine.
- bcx.range_metadata(load, min..max_next);
- }
- layout::Pointer if 0 < min && min < max => {
- bcx.nonnull_metadata(load);
- }
- _ => {}
- }
- };
-
- let val = if self.layout.is_llvm_immediate() {
- let mut const_llval = ptr::null_mut();
- unsafe {
- let global = llvm::LLVMIsAGlobalVariable(self.llval);
- if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
- const_llval = llvm::LLVMGetInitializer(global);
- }
- }
-
- let llval = if !const_llval.is_null() {
- const_llval
- } else {
- let load = bcx.load(self.llval, self.alignment.non_abi());
- if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
- scalar_load_metadata(load, scalar);
- }
- load
- };
- OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
- } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
- let load = |i, scalar: &layout::Scalar| {
- let mut llptr = bcx.struct_gep(self.llval, i as u64);
- // Make sure to always load i1 as i8.
- if scalar.is_bool() {
- llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
- }
- let load = bcx.load(llptr, self.alignment.non_abi());
- scalar_load_metadata(load, scalar);
- if scalar.is_bool() {
- bcx.trunc(load, Type::i1(bcx.ccx))
- } else {
- load
- }
- };
- OperandValue::Pair(load(0, a), load(1, b))
- } else {
- OperandValue::Ref(self.llval, self.alignment)
- };
-
- OperandRef { val, layout: self.layout }
- }
-
- /// Access a field, at a point when the value's case is known.
- pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
- let ccx = bcx.ccx;
- let field = self.layout.field(ccx, ix);
- let offset = self.layout.fields.offset(ix);
- let alignment = self.alignment | Alignment::from(self.layout);
-
- let simple = || {
- // Unions and newtypes only use an offset of 0.
- let llval = if offset.bytes() == 0 {
- self.llval
- } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
- // Offsets have to match either first or second field.
- assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx)));
- bcx.struct_gep(self.llval, 1)
- } else {
- bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
- };
- PlaceRef {
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()),
- llextra: if ccx.shared().type_has_metadata(field.ty) {
- self.llextra
- } else {
- ptr::null_mut()
- },
- layout: field,
- alignment,
- }
- };
-
- // Simple case - we can just GEP the field
- // * Packed struct - There is no alignment padding
- // * Field is sized - pointer is properly aligned already
- if self.layout.is_packed() || !field.is_unsized() {
- return simple();
- }
-
- // If the type of the last field is [T], str or a foreign type, then we don't need to do
- // any adjusments
- match field.ty.sty {
- ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
- _ => ()
- }
-
- // There's no metadata available, log the case and just do the GEP.
- if !self.has_extra() {
- debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
- ix, Value(self.llval));
- return simple();
- }
-
- // We need to get the pointer manually now.
- // We do this by casting to a *i8, then offsetting it by the appropriate amount.
- // We do this instead of, say, simply adjusting the pointer from the result of a GEP
- // because the field may have an arbitrary alignment in the LLVM representation
- // anyway.
- //
- // To demonstrate:
- // struct Foo<T: ?Sized> {
- // x: u16,
- // y: T
- // }
- //
- // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
- // the `y` field has 16-bit alignment.
-
- let meta = self.llextra;
-
- let unaligned_offset = C_usize(ccx, offset.bytes());
-
- // Get the alignment of the field
- let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
-
- // Bump the unaligned offset up to the appropriate alignment using the
- // following expression:
- //
- // (unaligned offset + (align - 1)) & -align
-
- // Calculate offset
- let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64));
- let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
- bcx.neg(align));
-
- debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
-
- // Cast and adjust pointer
- let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx));
- let byte_ptr = bcx.gep(byte_ptr, &[offset]);
-
- // Finally, cast back to the type expected
- let ll_fty = field.llvm_type(ccx);
- debug!("struct_field_ptr: Field type is {:?}", ll_fty);
-
- PlaceRef {
- llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
- llextra: self.llextra,
- layout: field,
- alignment,
- }
- }
-
- /// Obtain the actual discriminant of a value.
- pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
- let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
- match self.layout.variants {
- layout::Variants::Single { index } => {
- return C_uint(cast_to, index as u64);
- }
- layout::Variants::Tagged { .. } |
- layout::Variants::NicheFilling { .. } => {},
- }
-
- let discr = self.project_field(bcx, 0);
- let lldiscr = discr.load(bcx).immediate();
- match self.layout.variants {
- layout::Variants::Single { .. } => bug!(),
- layout::Variants::Tagged { ref discr, .. } => {
- let signed = match discr.value {
- layout::Int(_, signed) => signed,
- _ => false
- };
- bcx.intcast(lldiscr, cast_to, signed)
- }
- layout::Variants::NicheFilling {
- dataful_variant,
- ref niche_variants,
- niche_start,
- ..
- } => {
- let niche_llty = discr.layout.immediate_llvm_type(bcx.ccx);
- if niche_variants.start == niche_variants.end {
- // FIXME(eddyb) Check the actual primitive type here.
- let niche_llval = if niche_start == 0 {
- // HACK(eddyb) Using `C_null` as it works on all types.
- C_null(niche_llty)
- } else {
- C_uint_big(niche_llty, niche_start)
- };
- bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval),
- C_uint(cast_to, niche_variants.start as u64),
- C_uint(cast_to, dataful_variant as u64))
- } else {
- // Rebase from niche values to discriminant values.
- let delta = niche_start.wrapping_sub(niche_variants.start as u128);
- let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta));
- let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64);
- bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
- bcx.intcast(lldiscr, cast_to, false),
- C_uint(cast_to, dataful_variant as u64))
- }
- }
- }
- }
-
- /// Set the discriminant for a new value of the given case of the given
- /// representation.
- pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
- match self.layout.variants {
- layout::Variants::Single { index } => {
- if index != variant_index {
- // If the layout of an enum is `Single`, all
- // other variants are necessarily uninhabited.
- assert_eq!(self.layout.for_variant(bcx.ccx, variant_index).abi,
- layout::Abi::Uninhabited);
- }
- }
- layout::Variants::Tagged { .. } => {
- let ptr = self.project_field(bcx, 0);
- let to = self.layout.ty.ty_adt_def().unwrap()
- .discriminant_for_variant(bcx.tcx(), variant_index)
- .to_u128_unchecked() as u64;
- bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
- ptr.llval, ptr.alignment.non_abi());
- }
- layout::Variants::NicheFilling {
- dataful_variant,
- ref niche_variants,
- niche_start,
- ..
- } => {
- if variant_index != dataful_variant {
- if bcx.sess().target.target.arch == "arm" ||
- bcx.sess().target.target.arch == "aarch64" {
- // Issue #34427: As workaround for LLVM bug on ARM,
- // use memset of 0 before assigning niche value.
- let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
- let fill_byte = C_u8(bcx.ccx, 0);
- let (size, align) = self.layout.size_and_align();
- let size = C_usize(bcx.ccx, size.bytes());
- let align = C_u32(bcx.ccx, align.abi() as u32);
- base::call_memset(bcx, llptr, fill_byte, size, align, false);
- }
-
- let niche = self.project_field(bcx, 0);
- let niche_llty = niche.layout.immediate_llvm_type(bcx.ccx);
- let niche_value = ((variant_index - niche_variants.start) as u128)
- .wrapping_add(niche_start);
- // FIXME(eddyb) Check the actual primitive type here.
- let niche_llval = if niche_value == 0 {
- // HACK(eddyb) Using `C_null` as it works on all types.
- C_null(niche_llty)
- } else {
- C_uint_big(niche_llty, niche_value)
- };
- OperandValue::Immediate(niche_llval).store(bcx, niche);
- }
- }
- }
- }
-
- pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
- -> PlaceRef<'tcx> {
- PlaceRef {
- llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]),
- llextra: ptr::null_mut(),
- layout: self.layout.field(bcx.ccx, 0),
- alignment: self.alignment
- }
- }
-
- pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
- -> PlaceRef<'tcx> {
- let mut downcast = *self;
- downcast.layout = self.layout.for_variant(bcx.ccx, variant_index);
-
- // Cast to the appropriate variant struct type.
- let variant_ty = downcast.layout.llvm_type(bcx.ccx);
- downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
-
- downcast
- }
-
- pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
- bcx.lifetime_start(self.llval, self.layout.size);
- }
-
- pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
- bcx.lifetime_end(self.llval, self.layout.size);
- }
-}
-
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
- pub fn trans_lvalue(&mut self,
- bcx: &Builder<'a, 'tcx>,
- lvalue: &mir::Place<'tcx>)
- -> PlaceRef<'tcx> {
- debug!("trans_lvalue(lvalue={:?})", lvalue);
-
- let ccx = bcx.ccx;
- let tcx = ccx.tcx();
-
- if let mir::Place::Local(index) = *lvalue {
- match self.locals[index] {
- LocalRef::Place(lvalue) => {
- return lvalue;
- }
- LocalRef::Operand(..) => {
- bug!("using operand local {:?} as lvalue", lvalue);
- }
- }
- }
-
- let result = match *lvalue {
- mir::Place::Local(_) => bug!(), // handled above
- mir::Place::Static(box mir::Static { def_id, ty }) => {
- PlaceRef::new_sized(consts::get_static(ccx, def_id),
- ccx.layout_of(self.monomorphize(&ty)),
- Alignment::AbiAligned)
- },
- mir::Place::Projection(box mir::Projection {
- ref base,
- elem: mir::ProjectionElem::Deref
- }) => {
- // Load the pointer from its location.
- self.trans_consume(bcx, base).deref(bcx.ccx)
- }
- mir::Place::Projection(ref projection) => {
- let tr_base = self.trans_lvalue(bcx, &projection.base);
-
- match projection.elem {
- mir::ProjectionElem::Deref => bug!(),
- mir::ProjectionElem::Field(ref field, _) => {
- tr_base.project_field(bcx, field.index())
- }
- mir::ProjectionElem::Index(index) => {
- let index = &mir::Operand::Copy(mir::Place::Local(index));
- let index = self.trans_operand(bcx, index);
- let llindex = index.immediate();
- tr_base.project_index(bcx, llindex)
- }
- mir::ProjectionElem::ConstantIndex { offset,
- from_end: false,
- min_length: _ } => {
- let lloffset = C_usize(bcx.ccx, offset as u64);
- tr_base.project_index(bcx, lloffset)
- }
- mir::ProjectionElem::ConstantIndex { offset,
- from_end: true,
- min_length: _ } => {
- let lloffset = C_usize(bcx.ccx, offset as u64);
- let lllen = tr_base.len(bcx.ccx);
- let llindex = bcx.sub(lllen, lloffset);
- tr_base.project_index(bcx, llindex)
- }
- mir::ProjectionElem::Subslice { from, to } => {
- let mut subslice = tr_base.project_index(bcx,
- C_usize(bcx.ccx, from as u64));
- let projected_ty = PlaceTy::Ty { ty: tr_base.layout.ty }
- .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx());
- subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty));
-
- if subslice.layout.is_unsized() {
- assert!(tr_base.has_extra());
- subslice.llextra = bcx.sub(tr_base.llextra,
- C_usize(bcx.ccx, (from as u64) + (to as u64)));
- }
-
- // Cast the lvalue pointer type to the new
- // array or slice type (*[%_; new_len]).
- subslice.llval = bcx.pointercast(subslice.llval,
- subslice.layout.llvm_type(bcx.ccx).ptr_to());
-
- subslice
- }
- mir::ProjectionElem::Downcast(_, v) => {
- tr_base.project_downcast(bcx, v)
- }
- }
- }
- };
- debug!("trans_lvalue(lvalue={:?}) => {:?}", lvalue, result);
- result
- }
-
- pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Place<'tcx>) -> Ty<'tcx> {
- let tcx = self.ccx.tcx();
- let lvalue_ty = lvalue.ty(self.mir, tcx);
- self.monomorphize(&lvalue_ty.to_ty(tcx))
- }
-}
-
pub use self::constant::trans_static_initializer;
use self::analyze::CleanupKind;
-use self::lvalue::{Alignment, PlaceRef};
+use self::place::{Alignment, PlaceRef};
use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
/// - the type of the local must be judged "immediate" by `is_llvm_immediate`
/// - the operand must never be referenced indirectly
/// - we should not take its address using the `&` operator
- /// - nor should it appear in an lvalue path like `tmp.a`
+ /// - nor should it appear in a place path like `tmp.a`
/// - the operand must be defined by an rvalue that can generate immediate
/// values
///
},
};
- let lvalue_locals = analyze::lvalue_locals(&mircx);
+ let memory_locals = analyze::memory_locals(&mircx);
// Allocate variable and temp allocas
mircx.locals = {
- let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &lvalue_locals);
+ let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &memory_locals);
let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
let debug_scope = mircx.scopes[decl.source_info.scope];
let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
- if !lvalue_locals.contains(local.index()) && !dbg {
+ if !memory_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
return LocalRef::new_operand(bcx.ccx, layout);
}
- debug!("alloc: {:?} ({}) -> lvalue", local, name);
- let lvalue = PlaceRef::alloca(&bcx, layout, &name.as_str());
+ debug!("alloc: {:?} ({}) -> place", local, name);
+ let place = PlaceRef::alloca(&bcx, layout, &name.as_str());
if dbg {
let (scope, span) = mircx.debug_loc(decl.source_info);
declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope,
- VariableAccess::DirectVariable { alloca: lvalue.llval },
+ VariableAccess::DirectVariable { alloca: place.llval },
VariableKind::LocalVariable, span);
}
- LocalRef::Place(lvalue)
+ LocalRef::Place(place)
} else {
- // Temporary or return pointer
- if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
- debug!("alloc: {:?} (return pointer) -> lvalue", local);
+ // Temporary or return place
+ if local == mir::RETURN_PLACE && mircx.fn_ty.ret.is_indirect() {
+ debug!("alloc: {:?} (return place) -> place", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr,
layout,
Alignment::AbiAligned))
- } else if lvalue_locals.contains(local.index()) {
- debug!("alloc: {:?} -> lvalue", local);
+ } else if memory_locals.contains(local.index()) {
+ debug!("alloc: {:?} -> place", local);
LocalRef::Place(PlaceRef::alloca(&bcx, layout, &format!("{:?}", local)))
} else {
// If this is an immediate local, we do not create an
}
};
- let retptr = allocate_local(mir::RETURN_POINTER);
+ let retptr = allocate_local(mir::RETURN_PLACE);
iter::once(retptr)
.chain(args.into_iter())
.chain(mir.vars_and_temps_iter().map(allocate_local))
}
/// Produce, for each argument, a `ValueRef` pointing at the
-/// argument's value. As arguments are lvalues, these are always
+/// argument's value. As arguments are places, these are always
/// indirect.
fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
mircx: &MirContext<'a, 'tcx>,
scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
- lvalue_locals: &BitVector)
+ memory_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let mir = mircx.mir;
let tcx = bcx.tcx();
_ => bug!("spread argument isn't a tuple?!")
};
- let lvalue = PlaceRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
+ let place = PlaceRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
for i in 0..tupled_arg_tys.len() {
let arg = &mircx.fn_ty.args[idx];
idx += 1;
- arg.store_fn_arg(bcx, &mut llarg_idx, lvalue.project_field(bcx, i));
+ arg.store_fn_arg(bcx, &mut llarg_idx, place.project_field(bcx, i));
}
// Now that we have one alloca that contains the aggregate value,
// we can create one debuginfo entry for the argument.
arg_scope.map(|scope| {
let variable_access = VariableAccess::DirectVariable {
- alloca: lvalue.llval
+ alloca: place.llval
};
declare_local(
bcx,
);
});
- return LocalRef::Place(lvalue);
+ return LocalRef::Place(place);
}
let arg = &mircx.fn_ty.args[idx];
llarg_idx += 1;
}
- if arg_scope.is_none() && !lvalue_locals.contains(local.index()) {
+ if arg_scope.is_none() && !memory_locals.contains(local.index()) {
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.
}
}
- let lvalue = if arg.is_indirect() {
+ let place = if arg.is_indirect() {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up.
// FIXME: lifetimes
// need to insert a deref here, but the C ABI uses a pointer and a copy using the
// byval attribute, for which LLVM does the deref itself, so we must not add it.
let mut variable_access = VariableAccess::DirectVariable {
- alloca: lvalue.llval
+ alloca: place.llval
};
if let PassMode::Indirect(ref attrs) = arg.mode {
if !attrs.contains(ArgAttribute::ByVal) {
variable_access = VariableAccess::IndirectVariable {
- alloca: lvalue.llval,
+ alloca: place.llval,
address_operations: &deref_op,
};
}
let alloc = PlaceRef::alloca(bcx,
bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
"__debuginfo_env_ptr");
- bcx.store(lvalue.llval, alloc.llval, None);
+ bcx.store(place.llval, alloc.llval, None);
alloc.llval
} else {
- lvalue.llval
+ place.llval
};
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
);
}
});
- LocalRef::Place(lvalue)
+ LocalRef::Place(place)
}).collect()
}
mod analyze;
mod block;
mod constant;
-pub mod lvalue;
+pub mod place;
pub mod operand;
mod rvalue;
mod statement;
use std::ptr;
use super::{MirContext, LocalRef};
-use super::lvalue::{Alignment, PlaceRef};
+use super::place::{Alignment, PlaceRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
impl<'a, 'tcx> MirContext<'a, 'tcx> {
fn maybe_trans_consume_direct(&mut self,
bcx: &Builder<'a, 'tcx>,
- lvalue: &mir::Place<'tcx>)
+ place: &mir::Place<'tcx>)
-> Option<OperandRef<'tcx>>
{
- debug!("maybe_trans_consume_direct(lvalue={:?})", lvalue);
+ debug!("maybe_trans_consume_direct(place={:?})", place);
// watch out for locals that do not have an
// alloca; they are handled somewhat differently
- if let mir::Place::Local(index) = *lvalue {
+ if let mir::Place::Local(index) = *place {
match self.locals[index] {
LocalRef::Operand(Some(o)) => {
return Some(o);
}
LocalRef::Operand(None) => {
- bug!("use of {:?} before def", lvalue);
+ bug!("use of {:?} before def", place);
}
LocalRef::Place(..) => {
// use path below
}
// Moves out of scalar and scalar pair fields are trivial.
- if let &mir::Place::Projection(ref proj) = lvalue {
+ if let &mir::Place::Projection(ref proj) = place {
if let mir::ProjectionElem::Field(ref f, _) = proj.elem {
if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
return Some(o.extract_field(bcx, f.index()));
pub fn trans_consume(&mut self,
bcx: &Builder<'a, 'tcx>,
- lvalue: &mir::Place<'tcx>)
+ place: &mir::Place<'tcx>)
-> OperandRef<'tcx>
{
- debug!("trans_consume(lvalue={:?})", lvalue);
+ debug!("trans_consume(place={:?})", place);
- let ty = self.monomorphized_lvalue_ty(lvalue);
+ let ty = self.monomorphized_place_ty(place);
let layout = bcx.ccx.layout_of(ty);
// ZSTs don't require any actual memory access.
return OperandRef::new_zst(bcx.ccx, layout);
}
- if let Some(o) = self.maybe_trans_consume_direct(bcx, lvalue) {
+ if let Some(o) = self.maybe_trans_consume_direct(bcx, place) {
return o;
}
- // for most lvalues, to consume them we just load them
+ // for most places, to consume them we just load them
// out from their home
- self.trans_lvalue(bcx, lvalue).load(bcx)
+ self.trans_place(bcx, place).load(bcx)
}
pub fn trans_operand(&mut self,
debug!("trans_operand(operand={:?})", operand);
match *operand {
- mir::Operand::Copy(ref lvalue) |
- mir::Operand::Move(ref lvalue) => {
- self.trans_consume(bcx, lvalue)
+ mir::Operand::Copy(ref place) |
+ mir::Operand::Move(ref place) => {
+ self.trans_consume(bcx, place)
}
mir::Operand::Constant(ref constant) => {
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm::{self, ValueRef};
+use rustc::ty::{self, Ty};
+use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
+use rustc::mir;
+use rustc::mir::tcx::PlaceTy;
+use rustc_data_structures::indexed_vec::Idx;
+use base;
+use builder::Builder;
+use common::{CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big};
+use consts;
+use type_of::LayoutLlvmExt;
+use type_::Type;
+use value::Value;
+use glue;
+
+use std::ptr;
+use std::ops;
+
+use super::{MirContext, LocalRef};
+use super::operand::{OperandRef, OperandValue};
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Alignment {
+ Packed(Align),
+ AbiAligned,
+}
+
+impl ops::BitOr for Alignment {
+ type Output = Self;
+
+ fn bitor(self, rhs: Self) -> Self {
+ match (self, rhs) {
+ (Alignment::Packed(a), Alignment::Packed(b)) => {
+ Alignment::Packed(a.min(b))
+ }
+ (Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => {
+ Alignment::Packed(x)
+ }
+ (Alignment::AbiAligned, Alignment::AbiAligned) => {
+ Alignment::AbiAligned
+ }
+ }
+ }
+}
+
+impl<'a> From<TyLayout<'a>> for Alignment {
+ fn from(layout: TyLayout) -> Self {
+ if layout.is_packed() {
+ Alignment::Packed(layout.align)
+ } else {
+ Alignment::AbiAligned
+ }
+ }
+}
+
+impl Alignment {
+ pub fn non_abi(self) -> Option<Align> {
+ match self {
+ Alignment::Packed(x) => Some(x),
+ Alignment::AbiAligned => None,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceRef<'tcx> {
+ /// Pointer to the contents of the place
+ pub llval: ValueRef,
+
+ /// This place's extra data if it is unsized, or null
+ pub llextra: ValueRef,
+
+ /// Monomorphized type of this place, including variant information
+ pub layout: TyLayout<'tcx>,
+
+ /// Whether this place is known to be aligned according to its layout
+ pub alignment: Alignment,
+}
+
+impl<'a, 'tcx> PlaceRef<'tcx> {
+ pub fn new_sized(llval: ValueRef,
+ layout: TyLayout<'tcx>,
+ alignment: Alignment)
+ -> PlaceRef<'tcx> {
+ PlaceRef {
+ llval,
+ llextra: ptr::null_mut(),
+ layout,
+ alignment
+ }
+ }
+
+ pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
+ -> PlaceRef<'tcx> {
+ debug!("alloca({:?}: {:?})", name, layout);
+ let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align);
+ Self::new_sized(tmp, layout, Alignment::AbiAligned)
+ }
+
+ pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
+ if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
+ if self.layout.is_unsized() {
+ assert!(self.has_extra());
+ assert_eq!(count, 0);
+ self.llextra
+ } else {
+ C_usize(ccx, count)
+ }
+ } else {
+ bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
+ }
+ }
+
+ pub fn has_extra(&self) -> bool {
+ !self.llextra.is_null()
+ }
+
+ pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
+ debug!("PlaceRef::load: {:?}", self);
+
+ assert!(!self.has_extra());
+
+ if self.layout.is_zst() {
+ return OperandRef::new_zst(bcx.ccx, self.layout);
+ }
+
+ let scalar_load_metadata = |load, scalar: &layout::Scalar| {
+ let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
+ let max_next = max.wrapping_add(1);
+ let bits = scalar.value.size(bcx.ccx).bits();
+ assert!(bits <= 128);
+ let mask = !0u128 >> (128 - bits);
+ // For a (max) value of -1, max will be `-1 as usize`, which overflows.
+ // However, that is fine here (it would still represent the full range),
+ // i.e., if the range is everything. The lo==hi case would be
+ // rejected by the LLVM verifier (it would mean either an
+ // empty set, which is impossible, or the entire range of the
+ // type, which is pointless).
+ match scalar.value {
+ layout::Int(..) if max_next & mask != min & mask => {
+ // llvm::ConstantRange can deal with ranges that wrap around,
+ // so an overflow on (max + 1) is fine.
+ bcx.range_metadata(load, min..max_next);
+ }
+ layout::Pointer if 0 < min && min < max => {
+ bcx.nonnull_metadata(load);
+ }
+ _ => {}
+ }
+ };
+
+ let val = if self.layout.is_llvm_immediate() {
+ let mut const_llval = ptr::null_mut();
+ unsafe {
+ let global = llvm::LLVMIsAGlobalVariable(self.llval);
+ if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
+ const_llval = llvm::LLVMGetInitializer(global);
+ }
+ }
+
+ let llval = if !const_llval.is_null() {
+ const_llval
+ } else {
+ let load = bcx.load(self.llval, self.alignment.non_abi());
+ if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
+ scalar_load_metadata(load, scalar);
+ }
+ load
+ };
+ OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
+ } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
+ let load = |i, scalar: &layout::Scalar| {
+ let mut llptr = bcx.struct_gep(self.llval, i as u64);
+ // Make sure to always load i1 as i8.
+ if scalar.is_bool() {
+ llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
+ }
+ let load = bcx.load(llptr, self.alignment.non_abi());
+ scalar_load_metadata(load, scalar);
+ if scalar.is_bool() {
+ bcx.trunc(load, Type::i1(bcx.ccx))
+ } else {
+ load
+ }
+ };
+ OperandValue::Pair(load(0, a), load(1, b))
+ } else {
+ OperandValue::Ref(self.llval, self.alignment)
+ };
+
+ OperandRef { val, layout: self.layout }
+ }
+
+ /// Access a field, at a point when the value's case is known.
+ pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
+ let ccx = bcx.ccx;
+ let field = self.layout.field(ccx, ix);
+ let offset = self.layout.fields.offset(ix);
+ let alignment = self.alignment | Alignment::from(self.layout);
+
+ let simple = || {
+ // Unions and newtypes only use an offset of 0.
+ let llval = if offset.bytes() == 0 {
+ self.llval
+ } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
+ // Offsets have to match either first or second field.
+ assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx)));
+ bcx.struct_gep(self.llval, 1)
+ } else {
+ bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
+ };
+ PlaceRef {
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()),
+ llextra: if ccx.shared().type_has_metadata(field.ty) {
+ self.llextra
+ } else {
+ ptr::null_mut()
+ },
+ layout: field,
+ alignment,
+ }
+ };
+
+ // Simple case - we can just GEP the field
+ // * Packed struct - There is no alignment padding
+ // * Field is sized - pointer is properly aligned already
+ if self.layout.is_packed() || !field.is_unsized() {
+ return simple();
+ }
+
+ // If the type of the last field is [T], str or a foreign type, then we don't need to do
+ // any adjusments
+ match field.ty.sty {
+ ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
+ _ => ()
+ }
+
+ // There's no metadata available, log the case and just do the GEP.
+ if !self.has_extra() {
+ debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, Value(self.llval));
+ return simple();
+ }
+
+ // We need to get the pointer manually now.
+ // We do this by casting to a *i8, then offsetting it by the appropriate amount.
+ // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+ // because the field may have an arbitrary alignment in the LLVM representation
+ // anyway.
+ //
+ // To demonstrate:
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
+ // the `y` field has 16-bit alignment.
+
+ let meta = self.llextra;
+
+ let unaligned_offset = C_usize(ccx, offset.bytes());
+
+ // Get the alignment of the field
+ let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
+
+ // Bump the unaligned offset up to the appropriate alignment using the
+ // following expression:
+ //
+ // (unaligned offset + (align - 1)) & -align
+
+ // Calculate offset
+ let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64));
+ let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
+ bcx.neg(align));
+
+ debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
+
+ // Cast and adjust pointer
+ let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx));
+ let byte_ptr = bcx.gep(byte_ptr, &[offset]);
+
+ // Finally, cast back to the type expected
+ let ll_fty = field.llvm_type(ccx);
+ debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+
+ PlaceRef {
+ llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
+ llextra: self.llextra,
+ layout: field,
+ alignment,
+ }
+ }
+
+ /// Obtain the actual discriminant of a value.
+ pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
+ let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
+ match self.layout.variants {
+ layout::Variants::Single { index } => {
+ return C_uint(cast_to, index as u64);
+ }
+ layout::Variants::Tagged { .. } |
+ layout::Variants::NicheFilling { .. } => {},
+ }
+
+ let discr = self.project_field(bcx, 0);
+ let lldiscr = discr.load(bcx).immediate();
+ match self.layout.variants {
+ layout::Variants::Single { .. } => bug!(),
+ layout::Variants::Tagged { ref discr, .. } => {
+ let signed = match discr.value {
+ layout::Int(_, signed) => signed,
+ _ => false
+ };
+ bcx.intcast(lldiscr, cast_to, signed)
+ }
+ layout::Variants::NicheFilling {
+ dataful_variant,
+ ref niche_variants,
+ niche_start,
+ ..
+ } => {
+ let niche_llty = discr.layout.immediate_llvm_type(bcx.ccx);
+ if niche_variants.start == niche_variants.end {
+ // FIXME(eddyb) Check the actual primitive type here.
+ let niche_llval = if niche_start == 0 {
+ // HACK(eddyb) Using `C_null` as it works on all types.
+ C_null(niche_llty)
+ } else {
+ C_uint_big(niche_llty, niche_start)
+ };
+ bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval),
+ C_uint(cast_to, niche_variants.start as u64),
+ C_uint(cast_to, dataful_variant as u64))
+ } else {
+ // Rebase from niche values to discriminant values.
+ let delta = niche_start.wrapping_sub(niche_variants.start as u128);
+ let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta));
+ let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64);
+ bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
+ bcx.intcast(lldiscr, cast_to, false),
+ C_uint(cast_to, dataful_variant as u64))
+ }
+ }
+ }
+ }
+
+ /// Set the discriminant for a new value of the given case of the given
+ /// representation.
+ pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
+ match self.layout.variants {
+ layout::Variants::Single { index } => {
+ if index != variant_index {
+ // If the layout of an enum is `Single`, all
+ // other variants are necessarily uninhabited.
+ assert_eq!(self.layout.for_variant(bcx.ccx, variant_index).abi,
+ layout::Abi::Uninhabited);
+ }
+ }
+ layout::Variants::Tagged { .. } => {
+ let ptr = self.project_field(bcx, 0);
+ let to = self.layout.ty.ty_adt_def().unwrap()
+ .discriminant_for_variant(bcx.tcx(), variant_index)
+ .to_u128_unchecked() as u64;
+ bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
+ ptr.llval, ptr.alignment.non_abi());
+ }
+ layout::Variants::NicheFilling {
+ dataful_variant,
+ ref niche_variants,
+ niche_start,
+ ..
+ } => {
+ if variant_index != dataful_variant {
+ if bcx.sess().target.target.arch == "arm" ||
+ bcx.sess().target.target.arch == "aarch64" {
+ // Issue #34427: As workaround for LLVM bug on ARM,
+ // use memset of 0 before assigning niche value.
+ let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
+ let fill_byte = C_u8(bcx.ccx, 0);
+ let (size, align) = self.layout.size_and_align();
+ let size = C_usize(bcx.ccx, size.bytes());
+ let align = C_u32(bcx.ccx, align.abi() as u32);
+ base::call_memset(bcx, llptr, fill_byte, size, align, false);
+ }
+
+ let niche = self.project_field(bcx, 0);
+ let niche_llty = niche.layout.immediate_llvm_type(bcx.ccx);
+ let niche_value = ((variant_index - niche_variants.start) as u128)
+ .wrapping_add(niche_start);
+ // FIXME(eddyb) Check the actual primitive type here.
+ let niche_llval = if niche_value == 0 {
+ // HACK(eddyb) Using `C_null` as it works on all types.
+ C_null(niche_llty)
+ } else {
+ C_uint_big(niche_llty, niche_value)
+ };
+ OperandValue::Immediate(niche_llval).store(bcx, niche);
+ }
+ }
+ }
+ }
+
+ pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
+ -> PlaceRef<'tcx> {
+ PlaceRef {
+ llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]),
+ llextra: ptr::null_mut(),
+ layout: self.layout.field(bcx.ccx, 0),
+ alignment: self.alignment
+ }
+ }
+
+ pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
+ -> PlaceRef<'tcx> {
+ let mut downcast = *self;
+ downcast.layout = self.layout.for_variant(bcx.ccx, variant_index);
+
+ // Cast to the appropriate variant struct type.
+ let variant_ty = downcast.layout.llvm_type(bcx.ccx);
+ downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
+
+ downcast
+ }
+
+ pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
+ bcx.lifetime_start(self.llval, self.layout.size);
+ }
+
+ pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
+ bcx.lifetime_end(self.llval, self.layout.size);
+ }
+}
+
+impl<'a, 'tcx> MirContext<'a, 'tcx> {
+ pub fn trans_place(&mut self,
+ bcx: &Builder<'a, 'tcx>,
+ place: &mir::Place<'tcx>)
+ -> PlaceRef<'tcx> {
+ debug!("trans_place(place={:?})", place);
+
+ let ccx = bcx.ccx;
+ let tcx = ccx.tcx();
+
+ if let mir::Place::Local(index) = *place {
+ match self.locals[index] {
+ LocalRef::Place(place) => {
+ return place;
+ }
+ LocalRef::Operand(..) => {
+ bug!("using operand local {:?} as place", place);
+ }
+ }
+ }
+
+ let result = match *place {
+ mir::Place::Local(_) => bug!(), // handled above
+ mir::Place::Static(box mir::Static { def_id, ty }) => {
+ PlaceRef::new_sized(consts::get_static(ccx, def_id),
+ ccx.layout_of(self.monomorphize(&ty)),
+ Alignment::AbiAligned)
+ },
+ mir::Place::Projection(box mir::Projection {
+ ref base,
+ elem: mir::ProjectionElem::Deref
+ }) => {
+ // Load the pointer from its location.
+ self.trans_consume(bcx, base).deref(bcx.ccx)
+ }
+ mir::Place::Projection(ref projection) => {
+ let tr_base = self.trans_place(bcx, &projection.base);
+
+ match projection.elem {
+ mir::ProjectionElem::Deref => bug!(),
+ mir::ProjectionElem::Field(ref field, _) => {
+ tr_base.project_field(bcx, field.index())
+ }
+ mir::ProjectionElem::Index(index) => {
+ let index = &mir::Operand::Copy(mir::Place::Local(index));
+ let index = self.trans_operand(bcx, index);
+ let llindex = index.immediate();
+ tr_base.project_index(bcx, llindex)
+ }
+ mir::ProjectionElem::ConstantIndex { offset,
+ from_end: false,
+ min_length: _ } => {
+ let lloffset = C_usize(bcx.ccx, offset as u64);
+ tr_base.project_index(bcx, lloffset)
+ }
+ mir::ProjectionElem::ConstantIndex { offset,
+ from_end: true,
+ min_length: _ } => {
+ let lloffset = C_usize(bcx.ccx, offset as u64);
+ let lllen = tr_base.len(bcx.ccx);
+ let llindex = bcx.sub(lllen, lloffset);
+ tr_base.project_index(bcx, llindex)
+ }
+ mir::ProjectionElem::Subslice { from, to } => {
+ let mut subslice = tr_base.project_index(bcx,
+ C_usize(bcx.ccx, from as u64));
+ let projected_ty = PlaceTy::Ty { ty: tr_base.layout.ty }
+ .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx());
+ subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty));
+
+ if subslice.layout.is_unsized() {
+ assert!(tr_base.has_extra());
+ subslice.llextra = bcx.sub(tr_base.llextra,
+ C_usize(bcx.ccx, (from as u64) + (to as u64)));
+ }
+
+ // Cast the place pointer type to the new
+ // array or slice type (*[%_; new_len]).
+ subslice.llval = bcx.pointercast(subslice.llval,
+ subslice.layout.llvm_type(bcx.ccx).ptr_to());
+
+ subslice
+ }
+ mir::ProjectionElem::Downcast(_, v) => {
+ tr_base.project_downcast(bcx, v)
+ }
+ }
+ }
+ };
+ debug!("trans_place(place={:?}) => {:?}", place, result);
+ result
+ }
+
+ pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
+ let tcx = self.ccx.tcx();
+ let place_ty = place.ty(self.mir, tcx);
+ self.monomorphize(&place_ty.to_ty(tcx))
+ }
+}
+
use super::{MirContext, LocalRef};
use super::constant::const_scalar_checked_binop;
use super::operand::{OperandRef, OperandValue};
-use super::lvalue::PlaceRef;
+use super::place::PlaceRef;
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_rvalue(&mut self,
})
}
- mir::Rvalue::Ref(_, bk, ref lvalue) => {
- let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
+ mir::Rvalue::Ref(_, bk, ref place) => {
+ let tr_place = self.trans_place(&bcx, place);
- let ty = tr_lvalue.layout.ty;
+ let ty = tr_place.layout.ty;
- // Note: lvalues are indirect, so storing the `llval` into the
+ // Note: places are indirect, so storing the `llval` into the
// destination effectively creates a reference.
let val = if !bcx.ccx.shared().type_has_metadata(ty) {
- OperandValue::Immediate(tr_lvalue.llval)
+ OperandValue::Immediate(tr_place.llval)
} else {
- OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra)
+ OperandValue::Pair(tr_place.llval, tr_place.llextra)
};
(bcx, OperandRef {
val,
})
}
- mir::Rvalue::Len(ref lvalue) => {
- let size = self.evaluate_array_len(&bcx, lvalue);
+ mir::Rvalue::Len(ref place) => {
+ let size = self.evaluate_array_len(&bcx, place);
let operand = OperandRef {
val: OperandValue::Immediate(size),
layout: bcx.ccx.layout_of(bcx.tcx().types.usize),
})
}
- mir::Rvalue::Discriminant(ref lvalue) => {
+ mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
- let discr = self.trans_lvalue(&bcx, lvalue)
+ let discr = self.trans_place(&bcx, place)
.trans_get_discr(&bcx, discr_ty);
(bcx, OperandRef {
val: OperandValue::Immediate(discr),
fn evaluate_array_len(&mut self,
bcx: &Builder<'a, 'tcx>,
- lvalue: &mir::Place<'tcx>) -> ValueRef
+ place: &mir::Place<'tcx>) -> ValueRef
{
// ZST are passed as operands and require special handling
- // because trans_lvalue() panics if Local is operand.
- if let mir::Place::Local(index) = *lvalue {
+ // because trans_place() panics if Local is operand.
+ if let mir::Place::Local(index) = *place {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::TyArray(_, n) = op.layout.ty.sty {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
}
}
// use common size calculation for non zero-sized types
- let tr_value = self.trans_lvalue(&bcx, lvalue);
+ let tr_value = self.trans_place(&bcx, place);
return tr_value.len(bcx.ccx);
}
self.set_debug_loc(&bcx, statement.source_info);
match statement.kind {
- mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
- if let mir::Place::Local(index) = *lvalue {
+ mir::StatementKind::Assign(ref place, ref rvalue) => {
+ if let mir::Place::Local(index) = *place {
match self.locals[index] {
LocalRef::Place(tr_dest) => {
self.trans_rvalue(bcx, tr_dest, rvalue)
}
}
} else {
- let tr_dest = self.trans_lvalue(&bcx, lvalue);
+ let tr_dest = self.trans_place(&bcx, place);
self.trans_rvalue(bcx, tr_dest, rvalue)
}
}
- mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
- self.trans_lvalue(&bcx, lvalue)
+ mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
+ self.trans_place(&bcx, place)
.trans_set_discr(&bcx, variant_index);
bcx
}
mir::StatementKind::StorageLive(local) => {
- if let LocalRef::Place(tr_lval) = self.locals[local] {
- tr_lval.storage_live(&bcx);
+ if let LocalRef::Place(tr_place) = self.locals[local] {
+ tr_place.storage_live(&bcx);
}
bcx
}
mir::StatementKind::StorageDead(local) => {
- if let LocalRef::Place(tr_lval) = self.locals[local] {
- tr_lval.storage_dead(&bcx);
+ if let LocalRef::Place(tr_place) = self.locals[local] {
+ tr_place.storage_dead(&bcx);
}
bcx
}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
- self.trans_lvalue(&bcx, output)
+ self.trans_place(&bcx, output)
}).collect();
let input_vals = inputs.iter().map(|input| {