1 use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
2 use crate::llvm::{self, False, BasicBlock};
3 use crate::common::Funclet;
4 use crate::context::CodegenCx;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::value::Value;
8 use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate};
9 use rustc_codegen_ssa::MemFlags;
10 use libc::{c_uint, c_char};
11 use rustc::ty::{self, Ty, TyCtxt};
12 use rustc::ty::layout::{self, Align, Size, TyLayout};
13 use rustc::hir::def_id::DefId;
14 use rustc::session::config;
15 use rustc_data_structures::small_c_str::SmallCStr;
16 use rustc_codegen_ssa::traits::*;
17 use rustc_codegen_ssa::base::to_immediate;
18 use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef};
19 use rustc_codegen_ssa::mir::place::PlaceRef;
22 use std::ops::{Deref, Range};
25 // All Builders must have an llfn associated with them
27 pub struct Builder<'a, 'll: 'a, 'tcx: 'll> {
28 pub llbuilder: &'ll mut llvm::Builder<'ll>,
29 pub cx: &'a CodegenCx<'ll, 'tcx>,
32 impl Drop for Builder<'a, 'll, 'tcx> {
35 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
40 // This is a really awful way to get a zero-length c-string, but better (and a
41 // lot more efficient) than doing str::as_c_str("", ...) every time.
42 fn noname() -> *const c_char {
43 static CNULL: c_char = 0;
47 impl BackendTypes for Builder<'_, 'll, 'tcx> {
48 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
49 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
50 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
51 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
53 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
56 impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
57 fn data_layout(&self) -> &ty::layout::TargetDataLayout {
62 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
63 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
68 impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
70 type TyLayout = TyLayout<'tcx>;
72 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
77 impl Deref for Builder<'_, 'll, 'tcx> {
78 type Target = CodegenCx<'ll, 'tcx>;
80 fn deref(&self) -> &Self::Target {
85 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
86 type CodegenCx = CodegenCx<'ll, 'tcx>;
89 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
91 cx: &'a CodegenCx<'ll, 'tcx>,
95 let mut bx = Builder::with_cx(cx);
97 let name = SmallCStr::new(name);
98 llvm::LLVMAppendBasicBlockInContext(
104 bx.position_at_end(llbb);
108 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
109 // Create a fresh builder from the crate context.
110 let llbuilder = unsafe {
111 llvm::LLVMCreateBuilderInContext(cx.llcx)
119 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
120 Builder::new_block(self.cx, self.llfn(), name)
123 fn llfn(&self) -> &'ll Value {
125 llvm::LLVMGetBasicBlockParent(self.llbb())
129 fn llbb(&self) -> &'ll BasicBlock {
131 llvm::LLVMGetInsertBlock(self.llbuilder)
135 fn count_insn(&self, category: &str) {
136 if self.sess().codegen_stats() {
137 self.stats.borrow_mut().n_llvm_insns += 1;
139 if self.sess().count_llvm_insns() {
143 .entry(category.to_string())
148 fn set_value_name(&mut self, value: &'ll Value, name: &str) {
149 let cname = SmallCStr::new(name);
151 llvm::LLVMSetValueName(value, cname.as_ptr());
155 fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
157 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
161 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
163 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
167 fn ret_void(&mut self) {
168 self.count_insn("retvoid");
170 llvm::LLVMBuildRetVoid(self.llbuilder);
174 fn ret(&mut self, v: &'ll Value) {
175 self.count_insn("ret");
177 llvm::LLVMBuildRet(self.llbuilder, v);
181 fn br(&mut self, dest: &'ll BasicBlock) {
182 self.count_insn("br");
184 llvm::LLVMBuildBr(self.llbuilder, dest);
191 then_llbb: &'ll BasicBlock,
192 else_llbb: &'ll BasicBlock,
194 self.count_insn("condbr");
196 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
203 else_llbb: &'ll BasicBlock,
207 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
215 then: &'ll BasicBlock,
216 catch: &'ll BasicBlock,
217 funclet: Option<&Funclet<'ll>>,
219 self.count_insn("invoke");
221 debug!("Invoke {:?} with args ({:?})",
225 let args = self.check_call("invoke", llfn, args);
226 let bundle = funclet.map(|funclet| funclet.bundle());
227 let bundle = bundle.as_ref().map(|b| &*b.raw);
230 llvm::LLVMRustBuildInvoke(self.llbuilder,
233 args.len() as c_uint,
241 fn unreachable(&mut self) {
242 self.count_insn("unreachable");
244 llvm::LLVMBuildUnreachable(self.llbuilder);
249 fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
250 self.count_insn("add");
252 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
256 fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
257 self.count_insn("fadd");
259 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
263 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
264 self.count_insn("fadd");
266 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
267 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
272 fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
273 self.count_insn("sub");
275 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
279 fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
280 self.count_insn("fsub");
282 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
286 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
287 self.count_insn("fsub");
289 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
290 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
295 fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
296 self.count_insn("mul");
298 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
302 fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
303 self.count_insn("fmul");
305 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
309 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
310 self.count_insn("fmul");
312 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
313 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
319 fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
320 self.count_insn("udiv");
322 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
326 fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
327 self.count_insn("exactudiv");
329 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
333 fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
334 self.count_insn("sdiv");
336 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
340 fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
341 self.count_insn("exactsdiv");
343 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
347 fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
348 self.count_insn("fdiv");
350 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
354 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
355 self.count_insn("fdiv");
357 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
358 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
363 fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
364 self.count_insn("urem");
366 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
370 fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
371 self.count_insn("srem");
373 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
377 fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
378 self.count_insn("frem");
380 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
384 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
385 self.count_insn("frem");
387 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
388 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
393 fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
394 self.count_insn("shl");
396 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
400 fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
401 self.count_insn("lshr");
403 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
407 fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
408 self.count_insn("ashr");
410 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
414 fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
415 self.count_insn("and");
417 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
421 fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
422 self.count_insn("or");
424 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
428 fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
429 self.count_insn("xor");
431 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
435 fn neg(&mut self, v: &'ll Value) -> &'ll Value {
436 self.count_insn("neg");
438 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
442 fn fneg(&mut self, v: &'ll Value) -> &'ll Value {
443 self.count_insn("fneg");
445 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
449 fn not(&mut self, v: &'ll Value) -> &'ll Value {
450 self.count_insn("not");
452 llvm::LLVMBuildNot(self.llbuilder, v, noname())
462 ) -> (Self::Value, Self::Value) {
463 use syntax::ast::IntTy::*;
464 use syntax::ast::UintTy::*;
465 use rustc::ty::{Int, Uint};
467 let new_sty = match ty.sty {
468 Int(Isize) => Int(self.tcx.sess.target.isize_ty),
469 Uint(Usize) => Uint(self.tcx.sess.target.usize_ty),
470 ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
471 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
474 let name = match oop {
475 OverflowOp::Add => match new_sty {
476 Int(I8) => "llvm.sadd.with.overflow.i8",
477 Int(I16) => "llvm.sadd.with.overflow.i16",
478 Int(I32) => "llvm.sadd.with.overflow.i32",
479 Int(I64) => "llvm.sadd.with.overflow.i64",
480 Int(I128) => "llvm.sadd.with.overflow.i128",
482 Uint(U8) => "llvm.uadd.with.overflow.i8",
483 Uint(U16) => "llvm.uadd.with.overflow.i16",
484 Uint(U32) => "llvm.uadd.with.overflow.i32",
485 Uint(U64) => "llvm.uadd.with.overflow.i64",
486 Uint(U128) => "llvm.uadd.with.overflow.i128",
490 OverflowOp::Sub => match new_sty {
491 Int(I8) => "llvm.ssub.with.overflow.i8",
492 Int(I16) => "llvm.ssub.with.overflow.i16",
493 Int(I32) => "llvm.ssub.with.overflow.i32",
494 Int(I64) => "llvm.ssub.with.overflow.i64",
495 Int(I128) => "llvm.ssub.with.overflow.i128",
497 Uint(U8) => "llvm.usub.with.overflow.i8",
498 Uint(U16) => "llvm.usub.with.overflow.i16",
499 Uint(U32) => "llvm.usub.with.overflow.i32",
500 Uint(U64) => "llvm.usub.with.overflow.i64",
501 Uint(U128) => "llvm.usub.with.overflow.i128",
505 OverflowOp::Mul => match new_sty {
506 Int(I8) => "llvm.smul.with.overflow.i8",
507 Int(I16) => "llvm.smul.with.overflow.i16",
508 Int(I32) => "llvm.smul.with.overflow.i32",
509 Int(I64) => "llvm.smul.with.overflow.i64",
510 Int(I128) => "llvm.smul.with.overflow.i128",
512 Uint(U8) => "llvm.umul.with.overflow.i8",
513 Uint(U16) => "llvm.umul.with.overflow.i16",
514 Uint(U32) => "llvm.umul.with.overflow.i32",
515 Uint(U64) => "llvm.umul.with.overflow.i64",
516 Uint(U128) => "llvm.umul.with.overflow.i128",
522 let intrinsic = self.get_intrinsic(&name);
523 let res = self.call(intrinsic, &[lhs, rhs], None);
525 self.extract_value(res, 0),
526 self.extract_value(res, 1),
530 fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
531 let mut bx = Builder::with_cx(self.cx);
532 bx.position_at_start(unsafe {
533 llvm::LLVMGetFirstBasicBlock(self.llfn())
535 bx.dynamic_alloca(ty, name, align)
538 fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
539 self.count_insn("alloca");
541 let alloca = if name.is_empty() {
542 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
544 let name = SmallCStr::new(name);
545 llvm::LLVMBuildAlloca(self.llbuilder, ty,
548 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
553 fn array_alloca(&mut self,
557 align: Align) -> &'ll Value {
558 self.count_insn("alloca");
560 let alloca = if name.is_empty() {
561 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
563 let name = SmallCStr::new(name);
564 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
567 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
572 fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
573 self.count_insn("load");
575 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
576 llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
581 fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
582 self.count_insn("load.volatile");
584 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
585 llvm::LLVMSetVolatile(insn, llvm::True);
593 order: rustc_codegen_ssa::common::AtomicOrdering,
596 self.count_insn("load.atomic");
598 let load = llvm::LLVMRustBuildAtomicLoad(
602 AtomicOrdering::from_generic(order),
604 // LLVM requires the alignment of atomic loads to be at least the size of the type.
605 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
612 place: PlaceRef<'tcx, &'ll Value>
613 ) -> OperandRef<'tcx, &'ll Value> {
614 debug!("PlaceRef::load: {:?}", place);
616 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
618 if place.layout.is_zst() {
619 return OperandRef::new_zst(self.cx(), place.layout);
622 fn scalar_load_metadata<'a, 'll, 'tcx>(
623 bx: &mut Builder<'a, 'll, 'tcx>,
625 scalar: &layout::Scalar
627 let vr = scalar.valid_range.clone();
630 let range = scalar.valid_range_exclusive(bx);
631 if range.start != range.end {
632 bx.range_metadata(load, range);
635 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
636 bx.nonnull_metadata(load);
642 let val = if let Some(llextra) = place.llextra {
643 OperandValue::Ref(place.llval, Some(llextra), place.align)
644 } else if place.layout.is_llvm_immediate() {
645 let mut const_llval = None;
647 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
648 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
649 const_llval = llvm::LLVMGetInitializer(global);
653 let llval = const_llval.unwrap_or_else(|| {
654 let load = self.load(place.llval, place.align);
655 if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
656 scalar_load_metadata(self, load, scalar);
660 OperandValue::Immediate(to_immediate(self, llval, place.layout))
661 } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
662 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
664 let mut load = |i, scalar: &layout::Scalar, align| {
665 let llptr = self.struct_gep(place.llval, i as u64);
666 let load = self.load(llptr, align);
667 scalar_load_metadata(self, load, scalar);
668 if scalar.is_bool() {
669 self.trunc(load, self.type_i1())
676 load(0, a, place.align),
677 load(1, b, place.align.restrict_for_offset(b_offset)),
680 OperandValue::Ref(place.llval, None, place.align)
683 OperandRef { val, layout: place.layout }
688 fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
689 if self.sess().target.target.arch == "amdgpu" {
690 // amdgpu/LLVM does something weird and thinks a i64 value is
691 // split into a v2i32, halving the bitwidth LLVM expects,
692 // tripping an assertion. So, for now, just disable this
698 let llty = self.cx.val_ty(load);
700 self.cx.const_uint_big(llty, range.start),
701 self.cx.const_uint_big(llty, range.end)
704 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
705 llvm::LLVMMDNodeInContext(self.cx.llcx,
711 fn nonnull_metadata(&mut self, load: &'ll Value) {
713 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
714 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
718 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
719 self.store_with_flags(val, ptr, align, MemFlags::empty())
729 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
730 self.count_insn("store");
731 let ptr = self.check_store(val, ptr);
733 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
734 let align = if flags.contains(MemFlags::UNALIGNED) {
737 align.bytes() as c_uint
739 llvm::LLVMSetAlignment(store, align);
740 if flags.contains(MemFlags::VOLATILE) {
741 llvm::LLVMSetVolatile(store, llvm::True);
743 if flags.contains(MemFlags::NONTEMPORAL) {
744 // According to LLVM [1] building a nontemporal store must
745 // *always* point to a metadata value of the integer 1.
747 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
748 let one = self.cx.const_i32(1);
749 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
750 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
756 fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value,
757 order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) {
758 debug!("Store {:?} -> {:?}", val, ptr);
759 self.count_insn("store.atomic");
760 let ptr = self.check_store(val, ptr);
762 let store = llvm::LLVMRustBuildAtomicStore(
766 AtomicOrdering::from_generic(order),
768 // LLVM requires the alignment of atomic stores to be at least the size of the type.
769 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
773 fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
774 self.count_insn("gep");
776 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
777 indices.len() as c_uint, noname())
781 fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
782 self.count_insn("inboundsgep");
784 llvm::LLVMBuildInBoundsGEP(
785 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
790 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
791 self.count_insn("trunc");
793 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
797 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
798 self.count_insn("sext");
800 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
804 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
805 self.count_insn("fptoui");
807 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
811 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
812 self.count_insn("fptosi");
814 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
818 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
819 self.count_insn("uitofp");
821 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
825 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
826 self.count_insn("sitofp");
828 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
832 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
833 self.count_insn("fptrunc");
835 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
839 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
840 self.count_insn("fpext");
842 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
846 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
847 self.count_insn("ptrtoint");
849 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
853 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
854 self.count_insn("inttoptr");
856 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
860 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
861 self.count_insn("bitcast");
863 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
868 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
869 self.count_insn("intcast");
871 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
875 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
876 self.count_insn("pointercast");
878 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
883 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
884 self.count_insn("icmp");
885 let op = llvm::IntPredicate::from_generic(op);
887 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
891 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
892 self.count_insn("fcmp");
894 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
898 /* Miscellaneous instructions */
899 fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value {
900 self.count_insn("emptyphi");
902 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
906 fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
907 assert_eq!(vals.len(), bbs.len());
908 let phi = self.empty_phi(ty);
909 self.count_insn("addincoming");
911 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
913 vals.len() as c_uint);
918 fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr,
919 inputs: &[&'ll Value], output: &'ll Type,
920 volatile: bool, alignstack: bool,
921 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
922 self.count_insn("inlineasm");
924 let volatile = if volatile { llvm::True }
925 else { llvm::False };
926 let alignstack = if alignstack { llvm::True }
927 else { llvm::False };
929 let argtys = inputs.iter().map(|v| {
930 debug!("Asm Input Type: {:?}", *v);
932 }).collect::<Vec<_>>();
934 debug!("Asm Output Type: {:?}", output);
935 let fty = self.type_func(&argtys[..], output);
937 // Ask LLVM to verify that the constraints are well-formed.
938 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr());
939 debug!("Constraint verification result: {:?}", constraints_ok);
941 let v = llvm::LLVMRustInlineAsm(
947 AsmDialect::from_generic(dia),
949 Some(self.call(v, inputs, None))
951 // LLVM has detected an issue with our constraints, bail out
957 fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
958 src: &'ll Value, src_align: Align,
959 size: &'ll Value, flags: MemFlags) {
960 if flags.contains(MemFlags::NONTEMPORAL) {
961 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
962 let val = self.load(src, src_align);
963 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
964 self.store_with_flags(val, ptr, dst_align, flags);
967 let size = self.intcast(size, self.type_isize(), false);
968 let is_volatile = flags.contains(MemFlags::VOLATILE);
969 let dst = self.pointercast(dst, self.type_i8p());
970 let src = self.pointercast(src, self.type_i8p());
972 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
973 src, src_align.bytes() as c_uint, size, is_volatile);
977 fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
978 src: &'ll Value, src_align: Align,
979 size: &'ll Value, flags: MemFlags) {
980 if flags.contains(MemFlags::NONTEMPORAL) {
981 // HACK(nox): This is inefficient but there is no nontemporal memmove.
982 let val = self.load(src, src_align);
983 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
984 self.store_with_flags(val, ptr, dst_align, flags);
987 let size = self.intcast(size, self.type_isize(), false);
988 let is_volatile = flags.contains(MemFlags::VOLATILE);
989 let dst = self.pointercast(dst, self.type_i8p());
990 let src = self.pointercast(src, self.type_i8p());
992 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
993 src, src_align.bytes() as c_uint, size, is_volatile);
1000 fill_byte: &'ll Value,
1005 let ptr_width = &self.sess().target.target.target_pointer_width;
1006 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
1007 let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
1008 let ptr = self.pointercast(ptr, self.type_i8p());
1009 let align = self.const_u32(align.bytes() as u32);
1010 let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
1011 self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
1014 fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1015 self.count_insn("minnum");
1016 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1018 fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1019 self.count_insn("maxnum");
1020 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1024 &mut self, cond: &'ll Value,
1025 then_val: &'ll Value,
1026 else_val: &'ll Value,
1028 self.count_insn("select");
1030 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
1035 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
1036 self.count_insn("vaarg");
1038 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
1042 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
1043 self.count_insn("extractelement");
1045 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
1050 &mut self, vec: &'ll Value,
1054 self.count_insn("insertelement");
1056 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
1060 fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
1061 self.count_insn("shufflevector");
1063 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
1067 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
1069 let elt_ty = self.cx.val_ty(elt);
1070 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
1071 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
1072 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
1073 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
1077 fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1078 self.count_insn("vector.reduce.fadd_fast");
1080 // FIXME: add a non-fast math version once
1081 // https://bugs.llvm.org/show_bug.cgi?id=36732
1083 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1084 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1088 fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1089 self.count_insn("vector.reduce.fmul_fast");
1091 // FIXME: add a non-fast math version once
1092 // https://bugs.llvm.org/show_bug.cgi?id=36732
1094 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1095 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1099 fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1100 self.count_insn("vector.reduce.add");
1101 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1103 fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1104 self.count_insn("vector.reduce.mul");
1105 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1107 fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1108 self.count_insn("vector.reduce.and");
1109 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1111 fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1112 self.count_insn("vector.reduce.or");
1113 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1115 fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1116 self.count_insn("vector.reduce.xor");
1117 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1119 fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1120 self.count_insn("vector.reduce.fmin");
1121 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
1123 fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1124 self.count_insn("vector.reduce.fmax");
1125 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
1127 fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1128 self.count_insn("vector.reduce.fmin_fast");
1130 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1131 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1135 fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1136 self.count_insn("vector.reduce.fmax_fast");
1138 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1139 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1143 fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1144 self.count_insn("vector.reduce.min");
1145 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1147 fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1148 self.count_insn("vector.reduce.max");
1149 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1152 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
1153 self.count_insn("extractvalue");
1154 assert_eq!(idx as c_uint as u64, idx);
1156 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
1160 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value,
1161 idx: u64) -> &'ll Value {
1162 self.count_insn("insertvalue");
1163 assert_eq!(idx as c_uint as u64, idx);
1165 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
1170 fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value,
1171 num_clauses: usize) -> &'ll Value {
1172 self.count_insn("landingpad");
1174 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
1175 num_clauses as c_uint, noname())
1179 fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1181 llvm::LLVMAddClause(landing_pad, clause);
1185 fn set_cleanup(&mut self, landing_pad: &'ll Value) {
1186 self.count_insn("setcleanup");
1188 llvm::LLVMSetCleanup(landing_pad, llvm::True);
1192 fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
1193 self.count_insn("resume");
1195 llvm::LLVMBuildResume(self.llbuilder, exn)
1199 fn cleanup_pad(&mut self,
1200 parent: Option<&'ll Value>,
1201 args: &[&'ll Value]) -> Funclet<'ll> {
1202 self.count_insn("cleanuppad");
1203 let name = const_cstr!("cleanuppad");
1205 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
1207 args.len() as c_uint,
1211 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
1215 &mut self, funclet: &Funclet<'ll>,
1216 unwind: Option<&'ll BasicBlock>,
1218 self.count_insn("cleanupret");
1220 llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
1222 ret.expect("LLVM does not have support for cleanupret")
1225 fn catch_pad(&mut self,
1227 args: &[&'ll Value]) -> Funclet<'ll> {
1228 self.count_insn("catchpad");
1229 let name = const_cstr!("catchpad");
1231 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1232 args.len() as c_uint, args.as_ptr(),
1235 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
1238 fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1239 self.count_insn("catchret");
1241 llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind)
1243 ret.expect("LLVM does not have support for catchret")
1248 parent: Option<&'ll Value>,
1249 unwind: Option<&'ll BasicBlock>,
1250 num_handlers: usize,
1252 self.count_insn("catchswitch");
1253 let name = const_cstr!("catchswitch");
1255 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1256 num_handlers as c_uint,
1259 ret.expect("LLVM does not have support for catchswitch")
1262 fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1264 llvm::LLVMRustAddHandler(catch_switch, handler);
1268 fn set_personality_fn(&mut self, personality: &'ll Value) {
1270 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1274 // Atomic Operations
1280 order: rustc_codegen_ssa::common::AtomicOrdering,
1281 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
1284 let weak = if weak { llvm::True } else { llvm::False };
1286 llvm::LLVMRustBuildAtomicCmpXchg(
1291 AtomicOrdering::from_generic(order),
1292 AtomicOrdering::from_generic(failure_order),
1299 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
1302 order: rustc_codegen_ssa::common::AtomicOrdering,
1305 llvm::LLVMBuildAtomicRMW(
1307 AtomicRmwBinOp::from_generic(op),
1310 AtomicOrdering::from_generic(order),
1317 order: rustc_codegen_ssa::common::AtomicOrdering,
1318 scope: rustc_codegen_ssa::common::SynchronizationScope
1321 llvm::LLVMRustBuildAtomicFence(
1323 AtomicOrdering::from_generic(order),
1324 SynchronizationScope::from_generic(scope)
1329 fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1331 llvm::LLVMAddCase(s, on_val, dest)
1335 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1336 self.count_insn("addincoming");
1338 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1342 fn set_invariant_load(&mut self, load: &'ll Value) {
1344 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1345 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1349 fn check_store<'b>(&mut self,
1351 ptr: &'ll Value) -> &'ll Value {
1352 let dest_ptr_ty = self.cx.val_ty(ptr);
1353 let stored_ty = self.cx.val_ty(val);
1354 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1356 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1358 if dest_ptr_ty == stored_ptr_ty {
1361 debug!("Type mismatch in store. \
1362 Expected {:?}, got {:?}; inserting bitcast",
1363 dest_ptr_ty, stored_ptr_ty);
1364 self.bitcast(ptr, stored_ptr_ty)
1368 fn check_call<'b>(&mut self,
1371 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1372 let mut fn_ty = self.cx.val_ty(llfn);
1373 // Strip off pointers
1374 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1375 fn_ty = self.cx.element_type(fn_ty);
1378 assert!(self.cx.type_kind(fn_ty) == TypeKind::Function,
1379 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1381 let param_tys = self.cx.func_params_types(fn_ty);
1383 let all_args_match = param_tys.iter()
1384 .zip(args.iter().map(|&v| self.val_ty(v)))
1385 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1388 return Cow::Borrowed(args);
1391 let casted_args: Vec<_> = param_tys.into_iter()
1394 .map(|(i, (expected_ty, &actual_val))| {
1395 let actual_ty = self.val_ty(actual_val);
1396 if expected_ty != actual_ty {
1397 debug!("Type mismatch in function call of {:?}. \
1398 Expected {:?} for param {}, got {:?}; injecting bitcast",
1399 llfn, expected_ty, i, actual_ty);
1400 self.bitcast(actual_val, expected_ty)
1407 Cow::Owned(casted_args)
1410 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
1411 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1414 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
1415 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1421 args: &[&'ll Value],
1422 funclet: Option<&Funclet<'ll>>,
1424 self.count_insn("call");
1426 debug!("Call {:?} with args ({:?})",
1430 let args = self.check_call("call", llfn, args);
1431 let bundle = funclet.map(|funclet| funclet.bundle());
1432 let bundle = bundle.as_ref().map(|b| &*b.raw);
1435 llvm::LLVMRustBuildCall(
1438 args.as_ptr() as *const &llvm::Value,
1439 args.len() as c_uint,
1445 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1446 self.count_insn("zext");
1448 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1452 fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1453 self.count_insn("structgep");
1454 assert_eq!(idx as c_uint as u64, idx);
1456 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1460 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1464 unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
1465 llvm::LLVMDeleteBasicBlock(bb);
1468 fn do_not_inline(&mut self, llret: &'ll Value) {
1469 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
1473 impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
1474 fn get_static(&self, def_id: DefId) -> &'ll Value {
1475 self.cx().get_static(def_id)
1479 impl Builder<'a, 'll, 'tcx> {
1480 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1481 if self.cx.sess().opts.optimize == config::OptLevel::No {
1485 let size = size.bytes();
1490 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1492 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1493 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);