]> git.lizzy.rs Git - rust.git/blobdiff - src/librustc_codegen_llvm/mir/place.rs
Beginning of moving all backend-agnostic code to rustc_codegen_ssa
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
index b0740008995ed7b75841305d8c81d5fd34a83e17..8fa35d3aaf27423fba11f218649c6d7793c98a9e 100644 (file)
@@ -8,24 +8,19 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use llvm::{self, LLVMConstInBoundsGEP};
 use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
+use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
 use rustc::mir;
 use rustc::mir::tcx::PlaceTy;
-use base;
-use builder::Builder;
-use common::{CodegenCx, IntPredicate};
-use consts;
+use builder::MemFlags;
+use rustc_codegen_ssa::common::IntPredicate;
 use type_of::LayoutLlvmExt;
-use value::Value;
 use glue;
-use mir::constant::const_alloc_to_llvm;
 
-use interfaces::{BuilderMethods, CommonMethods, TypeMethods};
+use interfaces::*;
 
 use super::{FunctionCx, LocalRef};
-use super::operand::{OperandRef, OperandValue};
+use super::operand::OperandValue;
 
 #[derive(Copy, Clone, Debug)]
 pub struct PlaceRef<'tcx, V> {
@@ -42,12 +37,12 @@ pub struct PlaceRef<'tcx, V> {
     pub align: Align,
 }
 
-impl PlaceRef<'tcx, &'ll Value> {
+impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
     pub fn new_sized(
-        llval: &'ll Value,
+        llval: V,
         layout: TyLayout<'tcx>,
         align: Align,
-    ) -> PlaceRef<'tcx, &'ll Value> {
+    ) -> PlaceRef<'tcx, V> {
         assert!(!layout.is_unsized());
         PlaceRef {
             llval,
@@ -57,46 +52,34 @@ pub fn new_sized(
         }
     }
 
-    pub fn from_const_alloc(
-        bx: &Builder<'a, 'll, 'tcx>,
+    pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &Bx,
         layout: TyLayout<'tcx>,
-        alloc: &mir::interpret::Allocation,
-        offset: Size,
-    ) -> PlaceRef<'tcx, &'ll Value> {
-        let init = const_alloc_to_llvm(bx.cx(), alloc);
-        let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
-
-        let llval = unsafe { LLVMConstInBoundsGEP(
-            consts::bitcast(base_addr, bx.cx().i8p()),
-            &bx.cx().const_usize(offset.bytes()),
-            1,
-        )};
-        let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
-        PlaceRef::new_sized(llval, layout, alloc.align)
-    }
-
-    pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-                  -> PlaceRef<'tcx, &'ll Value> {
+        name: &str
+    ) -> Self {
         debug!("alloca({:?}: {:?})", name, layout);
         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
-        let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
+        let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
         Self::new_sized(tmp, layout, layout.align)
     }
 
     /// Returns a place for an indirect reference to an unsized place.
-    pub fn alloca_unsized_indirect(
-        bx: &Builder<'a, 'll, 'tcx>,
+    pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        bx: &Bx,
         layout: TyLayout<'tcx>,
         name: &str,
-    ) -> PlaceRef<'tcx, &'ll Value> {
+    ) -> Self {
         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
-        let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
+        let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
         let ptr_layout = bx.cx().layout_of(ptr_ty);
         Self::alloca(bx, ptr_layout, name)
     }
 
-    pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
+    pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
+        &self,
+        cx: &Cx
+    ) -> V {
         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
             if self.layout.is_unsized() {
                 assert_eq!(count, 0);
@@ -109,75 +92,14 @@ pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
         }
     }
 
-    pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> {
-        debug!("PlaceRef::load: {:?}", self);
-
-        assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
-
-        if self.layout.is_zst() {
-            return OperandRef::new_zst(bx.cx(), self.layout);
-        }
-
-        let scalar_load_metadata = |load, scalar: &layout::Scalar| {
-            let vr = scalar.valid_range.clone();
-            match scalar.value {
-                layout::Int(..) => {
-                    let range = scalar.valid_range_exclusive(bx.cx());
-                    if range.start != range.end {
-                        bx.range_metadata(load, range);
-                    }
-                }
-                layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
-                    bx.nonnull_metadata(load);
-                }
-                _ => {}
-            }
-        };
-
-        let val = if let Some(llextra) = self.llextra {
-            OperandValue::Ref(self.llval, Some(llextra), self.align)
-        } else if self.layout.is_llvm_immediate() {
-            let mut const_llval = None;
-            unsafe {
-                if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
-                    if llvm::LLVMIsGlobalConstant(global) == llvm::True {
-                        const_llval = llvm::LLVMGetInitializer(global);
-                    }
-                }
-            }
-            let llval = const_llval.unwrap_or_else(|| {
-                let load = bx.load(self.llval, self.align);
-                if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
-                    scalar_load_metadata(load, scalar);
-                }
-                load
-            });
-            OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
-        } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
-            let load = |i, scalar: &layout::Scalar| {
-                let llptr = bx.struct_gep(self.llval, i as u64);
-                let load = bx.load(llptr, self.align);
-                scalar_load_metadata(load, scalar);
-                if scalar.is_bool() {
-                    bx.trunc(load, bx.cx().i1())
-                } else {
-                    load
-                }
-            };
-            OperandValue::Pair(load(0, a), load(1, b))
-        } else {
-            OperandValue::Ref(self.llval, None, self.align)
-        };
-
-        OperandRef { val, layout: self.layout }
-    }
+}
 
+impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
     /// Access a field, at a point when the value's case is known.
-    pub fn project_field(
-        self,
-        bx: &Builder<'a, 'll, 'tcx>,
+    pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        self, bx: &Bx,
         ix: usize,
-    ) -> PlaceRef<'tcx, &'ll Value> {
+    ) -> Self {
         let cx = bx.cx();
         let field = self.layout.field(cx, ix);
         let offset = self.layout.fields.offset(ix);
@@ -196,7 +118,7 @@ pub fn project_field(
             };
             PlaceRef {
                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))),
+                llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
                 llextra: if cx.type_has_metadata(field.ty) {
                     self.llextra
                 } else {
@@ -265,15 +187,15 @@ pub fn project_field(
         debug!("struct_field_ptr: DST field offset: {:?}", offset);
 
         // Cast and adjust pointer
-        let byte_ptr = bx.pointercast(self.llval, cx.i8p());
+        let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
         let byte_ptr = bx.gep(byte_ptr, &[offset]);
 
         // Finally, cast back to the type expected
-        let ll_fty = field.llvm_type(cx);
+        let ll_fty = cx.backend_type(field);
         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
 
         PlaceRef {
-            llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)),
+            llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
             llextra: self.llextra,
             layout: field,
             align: effective_field_align,
@@ -281,12 +203,12 @@ pub fn project_field(
     }
 
     /// Obtain the actual discriminant of a value.
-    pub fn codegen_get_discr(
+    pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
         self,
-        bx: &Builder<'a, 'll, 'tcx>,
+        bx: &Bx,
         cast_to: Ty<'tcx>
-    ) -> &'ll Value {
-        let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
+    ) -> V {
+        let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
         if self.layout.abi.is_uninhabited() {
             return bx.cx().const_undef(cast_to);
         }
@@ -294,7 +216,7 @@ pub fn codegen_get_discr(
             layout::Variants::Single { index } => {
                 let discr_val = self.layout.ty.ty_adt_def().map_or(
                     index.as_u32() as u128,
-                    |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
+                    |def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
                 return bx.cx().const_uint_big(cast_to, discr_val);
             }
             layout::Variants::Tagged { .. } |
@@ -302,7 +224,7 @@ pub fn codegen_get_discr(
         }
 
         let discr = self.project_field(bx, 0);
-        let lldiscr = discr.load(bx).immediate();
+        let lldiscr = bx.load_operand(discr).immediate();
         match self.layout.variants {
             layout::Variants::Single { .. } => bug!(),
             layout::Variants::Tagged { ref tag, .. } => {
@@ -322,7 +244,7 @@ pub fn codegen_get_discr(
                 niche_start,
                 ..
             } => {
-                let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
+                let niche_llty = bx.cx().immediate_backend_type(discr.layout);
                 if niche_variants.start() == niche_variants.end() {
                     // FIXME(eddyb) Check the actual primitive type here.
                     let niche_llval = if niche_start == 0 {
@@ -350,7 +272,11 @@ pub fn codegen_get_discr(
 
     /// Set the discriminant for a new value of the given case of the given
     /// representation.
-    pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
+    pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &Bx,
+        variant_index: VariantIdx
+    ) {
         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
             return;
         }
@@ -364,7 +290,7 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
                     .discriminant_for_variant(bx.tcx(), variant_index)
                     .val;
                 bx.store(
-                    bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
+                    bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
                     ptr.llval,
                     ptr.align);
             }
@@ -375,20 +301,18 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
                 ..
             } => {
                 if variant_index != dataful_variant {
-                    if bx.sess().target.target.arch == "arm" ||
-                       bx.sess().target.target.arch == "aarch64" {
+                    if bx.cx().sess().target.target.arch == "arm" ||
+                       bx.cx().sess().target.target.arch == "aarch64" {
                         // Issue #34427: As workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
-                        let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
                         let fill_byte = bx.cx().const_u8(0);
                         let (size, align) = self.layout.size_and_align();
                         let size = bx.cx().const_usize(size.bytes());
-                        let align = bx.cx().const_u32(align.abi() as u32);
-                        base::call_memset(bx, llptr, fill_byte, size, align, false);
+                        bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
                     }
 
                     let niche = self.project_field(bx, 0);
-                    let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
+                    let niche_llty = bx.cx().immediate_backend_type(niche.layout);
                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
                     let niche_value = (niche_value as u128)
                         .wrapping_add(niche_start);
@@ -405,8 +329,11 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
         }
     }
 
-    pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-                         -> PlaceRef<'tcx, &'ll Value> {
+    pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &Bx,
+        llindex: V
+    ) -> Self {
         PlaceRef {
             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
             llextra: None,
@@ -415,36 +342,40 @@ pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
         }
     }
 
-    pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
-                            -> PlaceRef<'tcx, &'ll Value> {
+    pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+        &self,
+        bx: &Bx,
+        variant_index: VariantIdx
+    ) -> Self {
         let mut downcast = *self;
         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
 
         // Cast to the appropriate variant struct type.
-        let variant_ty = downcast.layout.llvm_type(bx.cx());
-        downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty));
+        let variant_ty = bx.cx().backend_type(downcast.layout);
+        downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
 
         downcast
     }
 
-    pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
+    pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
         bx.lifetime_start(self.llval, self.layout.size);
     }
 
-    pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
+    pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
         bx.lifetime_end(self.llval, self.layout.size);
     }
 }
 
-impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
-    pub fn codegen_place(&mut self,
-                        bx: &Builder<'a, 'll, 'tcx>,
-                        place: &mir::Place<'tcx>)
-                        -> PlaceRef<'tcx, &'ll Value> {
+impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_place(
+        &mut self,
+        bx: &Bx,
+        place: &mir::Place<'tcx>
+    ) -> PlaceRef<'tcx, Bx::Value> {
         debug!("codegen_place(place={:?})", place);
 
         let cx = bx.cx();
-        let tcx = cx.tcx;
+        let tcx = cx.tcx();
 
         if let mir::Place::Local(index) = *place {
             match self.locals[index] {
@@ -452,7 +383,7 @@ pub fn codegen_place(&mut self,
                     return place;
                 }
                 LocalRef::UnsizedPlace(place) => {
-                    return place.load(bx).deref(&cx);
+                    return bx.load_operand(place).deref(cx);
                 }
                 LocalRef::Operand(..) => {
                     bug!("using operand local {:?} as place", place);
@@ -472,7 +403,7 @@ pub fn codegen_place(&mut self,
                 match bx.tcx().const_eval(param_env.and(cid)) {
                     Ok(val) => match val.val {
                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
-                            PlaceRef::from_const_alloc(bx, layout, alloc, offset)
+                            bx.cx().from_const_alloc(layout, alloc, offset)
                         }
                         _ => bug!("promoteds should have an allocation: {:?}", val),
                     },
@@ -483,14 +414,16 @@ pub fn codegen_place(&mut self,
                         // so we generate an abort
                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
                         bx.call(fnname, &[], None);
-                        let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
+                        let llval = bx.cx().const_undef(
+                            bx.cx().type_ptr_to(bx.cx().backend_type(layout))
+                        );
                         PlaceRef::new_sized(llval, layout, layout.align)
                     }
                 }
             }
             mir::Place::Static(box mir::Static { def_id, ty }) => {
                 let layout = cx.layout_of(self.monomorphize(&ty));
-                PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align)
+                PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
             },
             mir::Place::Projection(box mir::Projection {
                 ref base,
@@ -531,8 +464,7 @@ pub fn codegen_place(&mut self,
                         let mut subslice = cg_base.project_index(bx,
                             bx.cx().const_usize(from as u64));
                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
-                            .projection_ty(tcx, &projection.elem)
-                            .to_ty(bx.tcx());
+                            .projection_ty(tcx, &projection.elem).to_ty(tcx);
                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
 
                         if subslice.layout.is_unsized() {
@@ -543,7 +475,7 @@ pub fn codegen_place(&mut self,
                         // Cast the place pointer type to the new
                         // array or slice type (*[%_; new_len]).
                         subslice.llval = bx.pointercast(subslice.llval,
-                            bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx())));
+                            bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
 
                         subslice
                     }
@@ -558,7 +490,7 @@ pub fn codegen_place(&mut self,
     }
 
     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
-        let tcx = self.cx.tcx;
+        let tcx = self.cx.tcx();
         let place_ty = place.ty(self.mir, tcx);
         self.monomorphize(&place_ty.to_ty(tcx))
     }