1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{self, False, BasicBlock};
13 use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate};
14 use rustc_codegen_ssa;
16 use context::CodegenCx;
18 use type_of::LayoutLlvmExt;
20 use libc::{c_uint, c_char};
21 use rustc::ty::{self, Ty, TyCtxt};
22 use rustc::ty::layout::{self, Align, Size, TyLayout};
23 use rustc::session::config;
24 use rustc_data_structures::small_c_str::SmallCStr;
28 use mir::operand::{OperandValue, OperandRef};
29 use mir::place::PlaceRef;
34 // All Builders must have an llfn associated with them
36 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
37 pub llbuilder: &'ll mut llvm::Builder<'ll>,
38 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
41 impl<V> Drop for Builder<'a, 'll, 'tcx, V> {
44 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
49 // This is a really awful way to get a zero-length c-string, but better (and a
50 // lot more efficient) than doing str::as_c_str("", ...) every time.
51 fn noname() -> *const c_char {
52 static CNULL: c_char = 0;
57 pub struct MemFlags: u8 {
58 const VOLATILE = 1 << 0;
59 const NONTEMPORAL = 1 << 1;
60 const UNALIGNED = 1 << 2;
64 impl BackendTypes for Builder<'_, 'll, 'tcx> {
65 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
66 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
67 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
68 type Context = <CodegenCx<'ll, 'tcx> as BackendTypes>::Context;
69 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
71 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
74 impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
75 fn data_layout(&self) -> &ty::layout::TargetDataLayout {
80 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
81 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
86 impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
88 type TyLayout = TyLayout<'tcx>;
90 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
96 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
97 type CodegenCx = CodegenCx<'ll, 'tcx>;
100 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
102 cx: &'a CodegenCx<'ll, 'tcx>,
106 let bx = Builder::with_cx(cx);
108 let name = SmallCStr::new(name);
109 llvm::LLVMAppendBasicBlockInContext(
115 bx.position_at_end(llbb);
119 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
120 // Create a fresh builder from the crate context.
121 let llbuilder = unsafe {
122 llvm::LLVMCreateBuilderInContext(cx.llcx)
130 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
131 Builder::new_block(self.cx, self.llfn(), name)
134 fn llfn(&self) -> &'ll Value {
136 llvm::LLVMGetBasicBlockParent(self.llbb())
140 fn llbb(&self) -> &'ll BasicBlock {
142 llvm::LLVMGetInsertBlock(self.llbuilder)
146 fn count_insn(&self, category: &str) {
147 if self.cx().sess().codegen_stats() {
148 self.cx().stats.borrow_mut().n_llvm_insns += 1;
150 if self.cx().sess().count_llvm_insns() {
154 .entry(category.to_string())
159 fn set_value_name(&self, value: &'ll Value, name: &str) {
160 let cname = SmallCStr::new(name);
162 llvm::LLVMSetValueName(value, cname.as_ptr());
166 fn position_at_end(&self, llbb: &'ll BasicBlock) {
168 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
172 fn position_at_start(&self, llbb: &'ll BasicBlock) {
174 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
179 self.count_insn("retvoid");
181 llvm::LLVMBuildRetVoid(self.llbuilder);
185 fn ret(&self, v: &'ll Value) {
186 self.count_insn("ret");
188 llvm::LLVMBuildRet(self.llbuilder, v);
192 fn br(&self, dest: &'ll BasicBlock) {
193 self.count_insn("br");
195 llvm::LLVMBuildBr(self.llbuilder, dest);
202 then_llbb: &'ll BasicBlock,
203 else_llbb: &'ll BasicBlock,
205 self.count_insn("condbr");
207 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
214 else_llbb: &'ll BasicBlock,
218 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
226 then: &'ll BasicBlock,
227 catch: &'ll BasicBlock,
228 funclet: Option<&Funclet<'ll>>,
230 self.count_insn("invoke");
232 debug!("Invoke {:?} with args ({:?})",
236 let args = self.check_call("invoke", llfn, args);
237 let bundle = funclet.map(|funclet| funclet.bundle());
238 let bundle = bundle.as_ref().map(|b| &*b.raw);
241 llvm::LLVMRustBuildInvoke(self.llbuilder,
244 args.len() as c_uint,
252 fn unreachable(&self) {
253 self.count_insn("unreachable");
255 llvm::LLVMBuildUnreachable(self.llbuilder);
260 fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
261 self.count_insn("add");
263 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
267 fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
268 self.count_insn("fadd");
270 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
274 fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
275 self.count_insn("fadd");
277 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
278 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
283 fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
284 self.count_insn("sub");
286 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
290 fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
291 self.count_insn("fsub");
293 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
297 fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
298 self.count_insn("fsub");
300 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
301 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
306 fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
307 self.count_insn("mul");
309 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
313 fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
314 self.count_insn("fmul");
316 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
320 fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
321 self.count_insn("fmul");
323 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
324 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
330 fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
331 self.count_insn("udiv");
333 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
337 fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
338 self.count_insn("exactudiv");
340 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
344 fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
345 self.count_insn("sdiv");
347 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
351 fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
352 self.count_insn("exactsdiv");
354 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
358 fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
359 self.count_insn("fdiv");
361 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
365 fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
366 self.count_insn("fdiv");
368 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
369 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
374 fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
375 self.count_insn("urem");
377 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
381 fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
382 self.count_insn("srem");
384 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
388 fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
389 self.count_insn("frem");
391 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
395 fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
396 self.count_insn("frem");
398 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
399 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
404 fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
405 self.count_insn("shl");
407 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
411 fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
412 self.count_insn("lshr");
414 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
418 fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
419 self.count_insn("ashr");
421 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
425 fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
426 self.count_insn("and");
428 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
432 fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
433 self.count_insn("or");
435 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
439 fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
440 self.count_insn("xor");
442 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
446 fn neg(&self, v: &'ll Value) -> &'ll Value {
447 self.count_insn("neg");
449 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
453 fn fneg(&self, v: &'ll Value) -> &'ll Value {
454 self.count_insn("fneg");
456 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
460 fn not(&self, v: &'ll Value) -> &'ll Value {
461 self.count_insn("not");
463 llvm::LLVMBuildNot(self.llbuilder, v, noname())
467 fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
468 let bx = Builder::with_cx(self.cx);
469 bx.position_at_start(unsafe {
470 llvm::LLVMGetFirstBasicBlock(self.llfn())
472 bx.dynamic_alloca(ty, name, align)
475 fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
476 self.count_insn("alloca");
478 let alloca = if name.is_empty() {
479 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
481 let name = SmallCStr::new(name);
482 llvm::LLVMBuildAlloca(self.llbuilder, ty,
485 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
490 fn array_alloca(&self,
494 align: Align) -> &'ll Value {
495 self.count_insn("alloca");
497 let alloca = if name.is_empty() {
498 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
500 let name = SmallCStr::new(name);
501 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
504 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
509 fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
510 self.count_insn("load");
512 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
513 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
518 fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
519 self.count_insn("load.volatile");
521 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
522 llvm::LLVMSetVolatile(insn, llvm::True);
530 order: rustc_codegen_ssa::common::AtomicOrdering,
533 self.count_insn("load.atomic");
535 let load = llvm::LLVMRustBuildAtomicLoad(
539 AtomicOrdering::from_generic(order),
541 // LLVM requires the alignment of atomic loads to be at least the size of the type.
542 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
549 place: PlaceRef<'tcx, &'ll Value>
550 ) -> OperandRef<'tcx, &'ll Value> {
551 debug!("PlaceRef::load: {:?}", place);
553 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
555 if place.layout.is_zst() {
556 return OperandRef::new_zst(self.cx(), place.layout);
559 let scalar_load_metadata = |load, scalar: &layout::Scalar| {
560 let vr = scalar.valid_range.clone();
563 let range = scalar.valid_range_exclusive(self.cx());
564 if range.start != range.end {
565 self.range_metadata(load, range);
568 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
569 self.nonnull_metadata(load);
575 let val = if let Some(llextra) = place.llextra {
576 OperandValue::Ref(place.llval, Some(llextra), place.align)
577 } else if place.layout.is_llvm_immediate() {
578 let mut const_llval = None;
580 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
581 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
582 const_llval = llvm::LLVMGetInitializer(global);
586 let llval = const_llval.unwrap_or_else(|| {
587 let load = self.load(place.llval, place.align);
588 if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
589 scalar_load_metadata(load, scalar);
593 OperandValue::Immediate(base::to_immediate(self, llval, place.layout))
594 } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
595 let load = |i, scalar: &layout::Scalar| {
596 let llptr = self.struct_gep(place.llval, i as u64);
597 let load = self.load(llptr, place.align);
598 scalar_load_metadata(load, scalar);
599 if scalar.is_bool() {
600 self.trunc(load, self.cx().type_i1())
605 OperandValue::Pair(load(0, a), load(1, b))
607 OperandValue::Ref(place.llval, None, place.align)
610 OperandRef { val, layout: place.layout }
615 fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
616 if self.cx().sess().target.target.arch == "amdgpu" {
617 // amdgpu/LLVM does something weird and thinks a i64 value is
618 // split into a v2i32, halving the bitwidth LLVM expects,
619 // tripping an assertion. So, for now, just disable this
625 let llty = self.cx.val_ty(load);
627 self.cx.const_uint_big(llty, range.start),
628 self.cx.const_uint_big(llty, range.end)
631 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
632 llvm::LLVMMDNodeInContext(self.cx.llcx,
638 fn nonnull_metadata(&self, load: &'ll Value) {
640 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
641 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
645 fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
646 self.store_with_flags(val, ptr, align, MemFlags::empty())
656 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
657 self.count_insn("store");
658 let ptr = self.check_store(val, ptr);
660 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
661 let align = if flags.contains(MemFlags::UNALIGNED) {
664 align.abi() as c_uint
666 llvm::LLVMSetAlignment(store, align);
667 if flags.contains(MemFlags::VOLATILE) {
668 llvm::LLVMSetVolatile(store, llvm::True);
670 if flags.contains(MemFlags::NONTEMPORAL) {
671 // According to LLVM [1] building a nontemporal store must
672 // *always* point to a metadata value of the integer 1.
674 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
675 let one = self.cx.const_i32(1);
676 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
677 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
683 fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
684 order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) {
685 debug!("Store {:?} -> {:?}", val, ptr);
686 self.count_insn("store.atomic");
687 let ptr = self.check_store(val, ptr);
689 let store = llvm::LLVMRustBuildAtomicStore(
693 AtomicOrdering::from_generic(order),
695 // LLVM requires the alignment of atomic stores to be at least the size of the type.
696 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
700 fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
701 self.count_insn("gep");
703 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
704 indices.len() as c_uint, noname())
708 fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
709 self.count_insn("inboundsgep");
711 llvm::LLVMBuildInBoundsGEP(
712 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
717 fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
718 self.count_insn("trunc");
720 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
724 fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
725 self.count_insn("sext");
727 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
731 fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
732 self.count_insn("fptoui");
734 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
738 fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
739 self.count_insn("fptosi");
741 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
745 fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
746 self.count_insn("uitofp");
748 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
752 fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
753 self.count_insn("sitofp");
755 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
759 fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
760 self.count_insn("fptrunc");
762 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
766 fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
767 self.count_insn("fpext");
769 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
773 fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
774 self.count_insn("ptrtoint");
776 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
780 fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
781 self.count_insn("inttoptr");
783 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
787 fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
788 self.count_insn("bitcast");
790 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
795 fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
796 self.count_insn("intcast");
798 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
802 fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
803 self.count_insn("pointercast");
805 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
810 fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
811 self.count_insn("icmp");
812 let op = llvm::IntPredicate::from_generic(op);
814 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
818 fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
819 self.count_insn("fcmp");
821 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
825 /* Miscellaneous instructions */
826 fn empty_phi(&self, ty: &'ll Type) -> &'ll Value {
827 self.count_insn("emptyphi");
829 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
833 fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
834 assert_eq!(vals.len(), bbs.len());
835 let phi = self.empty_phi(ty);
836 self.count_insn("addincoming");
838 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
840 vals.len() as c_uint);
845 fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
846 inputs: &[&'ll Value], output: &'ll Type,
847 volatile: bool, alignstack: bool,
848 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
849 self.count_insn("inlineasm");
851 let volatile = if volatile { llvm::True }
852 else { llvm::False };
853 let alignstack = if alignstack { llvm::True }
854 else { llvm::False };
856 let argtys = inputs.iter().map(|v| {
857 debug!("Asm Input Type: {:?}", *v);
859 }).collect::<Vec<_>>();
861 debug!("Asm Output Type: {:?}", output);
862 let fty = self.cx().type_func(&argtys[..], output);
864 // Ask LLVM to verify that the constraints are well-formed.
865 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
866 debug!("Constraint verification result: {:?}", constraints_ok);
868 let v = llvm::LLVMRustInlineAsm(
869 fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia));
870 Some(self.call(v, inputs, None))
872 // LLVM has detected an issue with our constraints, bail out
878 fn memcpy(&self, dst: &'ll Value, dst_align: Align,
879 src: &'ll Value, src_align: Align,
880 size: &'ll Value, flags: MemFlags) {
881 if flags.contains(MemFlags::NONTEMPORAL) {
882 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
883 let val = self.load(src, src_align);
884 let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
885 self.store_with_flags(val, ptr, dst_align, flags);
888 let size = self.intcast(size, self.cx().type_isize(), false);
889 let is_volatile = flags.contains(MemFlags::VOLATILE);
890 let dst = self.pointercast(dst, self.cx().type_i8p());
891 let src = self.pointercast(src, self.cx().type_i8p());
893 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint,
894 src, src_align.abi() as c_uint, size, is_volatile);
898 fn memmove(&self, dst: &'ll Value, dst_align: Align,
899 src: &'ll Value, src_align: Align,
900 size: &'ll Value, flags: MemFlags) {
901 if flags.contains(MemFlags::NONTEMPORAL) {
902 // HACK(nox): This is inefficient but there is no nontemporal memmove.
903 let val = self.load(src, src_align);
904 let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
905 self.store_with_flags(val, ptr, dst_align, flags);
908 let size = self.intcast(size, self.cx().type_isize(), false);
909 let is_volatile = flags.contains(MemFlags::VOLATILE);
910 let dst = self.pointercast(dst, self.cx().type_i8p());
911 let src = self.pointercast(src, self.cx().type_i8p());
913 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint,
914 src, src_align.abi() as c_uint, size, is_volatile);
921 fill_byte: &'ll Value,
926 let ptr_width = &self.cx().sess().target.target.target_pointer_width;
927 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
928 let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
929 let ptr = self.pointercast(ptr, self.cx().type_i8p());
930 let align = self.cx().const_u32(align.abi() as u32);
931 let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
932 self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
935 fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
936 self.count_insn("minnum");
938 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
939 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
942 fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
943 self.count_insn("maxnum");
945 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
946 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
951 &self, cond: &'ll Value,
952 then_val: &'ll Value,
953 else_val: &'ll Value,
955 self.count_insn("select");
957 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
962 fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
963 self.count_insn("vaarg");
965 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
969 fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
970 self.count_insn("extractelement");
972 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
977 &self, vec: &'ll Value,
981 self.count_insn("insertelement");
983 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
987 fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
988 self.count_insn("shufflevector");
990 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
994 fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
996 let elt_ty = self.cx.val_ty(elt);
997 let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
998 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
999 let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64);
1000 self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
1004 fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1005 self.count_insn("vector.reduce.fadd_fast");
1007 // FIXME: add a non-fast math version once
1008 // https://bugs.llvm.org/show_bug.cgi?id=36732
1010 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1011 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1015 fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1016 self.count_insn("vector.reduce.fmul_fast");
1018 // FIXME: add a non-fast math version once
1019 // https://bugs.llvm.org/show_bug.cgi?id=36732
1021 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1022 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1026 fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
1027 self.count_insn("vector.reduce.add");
1028 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1030 fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
1031 self.count_insn("vector.reduce.mul");
1032 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1034 fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
1035 self.count_insn("vector.reduce.and");
1036 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1038 fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
1039 self.count_insn("vector.reduce.or");
1040 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1042 fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
1043 self.count_insn("vector.reduce.xor");
1044 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1046 fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
1047 self.count_insn("vector.reduce.fmin");
1048 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
1050 fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
1051 self.count_insn("vector.reduce.fmax");
1052 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
1054 fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
1055 self.count_insn("vector.reduce.fmin_fast");
1057 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1058 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1062 fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
1063 self.count_insn("vector.reduce.fmax_fast");
1065 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1066 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1070 fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1071 self.count_insn("vector.reduce.min");
1072 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1074 fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1075 self.count_insn("vector.reduce.max");
1076 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1079 fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
1080 self.count_insn("extractvalue");
1081 assert_eq!(idx as c_uint as u64, idx);
1083 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
1087 fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
1088 idx: u64) -> &'ll Value {
1089 self.count_insn("insertvalue");
1090 assert_eq!(idx as c_uint as u64, idx);
1092 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
1097 fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value,
1098 num_clauses: usize) -> &'ll Value {
1099 self.count_insn("landingpad");
1101 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
1102 num_clauses as c_uint, noname())
1106 fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
1108 llvm::LLVMAddClause(landing_pad, clause);
1112 fn set_cleanup(&self, landing_pad: &'ll Value) {
1113 self.count_insn("setcleanup");
1115 llvm::LLVMSetCleanup(landing_pad, llvm::True);
1119 fn resume(&self, exn: &'ll Value) -> &'ll Value {
1120 self.count_insn("resume");
1122 llvm::LLVMBuildResume(self.llbuilder, exn)
1126 fn cleanup_pad(&self,
1127 parent: Option<&'ll Value>,
1128 args: &[&'ll Value]) -> Funclet<'ll> {
1129 self.count_insn("cleanuppad");
1130 let name = const_cstr!("cleanuppad");
1132 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
1134 args.len() as c_uint,
1138 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
1142 &self, funclet: &Funclet<'ll>,
1143 unwind: Option<&'ll BasicBlock>,
1145 self.count_insn("cleanupret");
1147 llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
1149 ret.expect("LLVM does not have support for cleanupret")
1154 args: &[&'ll Value]) -> Funclet<'ll> {
1155 self.count_insn("catchpad");
1156 let name = const_cstr!("catchpad");
1158 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1159 args.len() as c_uint, args.as_ptr(),
1162 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
1165 fn catch_ret(&self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1166 self.count_insn("catchret");
1168 llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind)
1170 ret.expect("LLVM does not have support for catchret")
1175 parent: Option<&'ll Value>,
1176 unwind: Option<&'ll BasicBlock>,
1177 num_handlers: usize,
1179 self.count_insn("catchswitch");
1180 let name = const_cstr!("catchswitch");
1182 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1183 num_handlers as c_uint,
1186 ret.expect("LLVM does not have support for catchswitch")
1189 fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1191 llvm::LLVMRustAddHandler(catch_switch, handler);
1195 fn set_personality_fn(&self, personality: &'ll Value) {
1197 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1201 // Atomic Operations
1207 order: rustc_codegen_ssa::common::AtomicOrdering,
1208 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
1211 let weak = if weak { llvm::True } else { llvm::False };
1213 llvm::LLVMRustBuildAtomicCmpXchg(
1218 AtomicOrdering::from_generic(order),
1219 AtomicOrdering::from_generic(failure_order),
1226 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
1229 order: rustc_codegen_ssa::common::AtomicOrdering,
1232 llvm::LLVMBuildAtomicRMW(
1234 AtomicRmwBinOp::from_generic(op),
1237 AtomicOrdering::from_generic(order),
1244 order: rustc_codegen_ssa::common::AtomicOrdering,
1245 scope: rustc_codegen_ssa::common::SynchronizationScope
1248 llvm::LLVMRustBuildAtomicFence(
1250 AtomicOrdering::from_generic(order),
1251 SynchronizationScope::from_generic(scope)
1256 fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1258 llvm::LLVMAddCase(s, on_val, dest)
1262 fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1263 self.count_insn("addincoming");
1265 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1269 fn set_invariant_load(&self, load: &'ll Value) {
1271 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1272 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1276 fn check_store<'b>(&self,
1278 ptr: &'ll Value) -> &'ll Value {
1279 let dest_ptr_ty = self.cx.val_ty(ptr);
1280 let stored_ty = self.cx.val_ty(val);
1281 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1283 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1285 if dest_ptr_ty == stored_ptr_ty {
1288 debug!("Type mismatch in store. \
1289 Expected {:?}, got {:?}; inserting bitcast",
1290 dest_ptr_ty, stored_ptr_ty);
1291 self.bitcast(ptr, stored_ptr_ty)
1295 fn check_call<'b>(&self,
1298 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1299 let mut fn_ty = self.cx.val_ty(llfn);
1300 // Strip off pointers
1301 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1302 fn_ty = self.cx.element_type(fn_ty);
1305 assert!(self.cx.type_kind(fn_ty) == TypeKind::Function,
1306 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1308 let param_tys = self.cx.func_params_types(fn_ty);
1310 let all_args_match = param_tys.iter()
1311 .zip(args.iter().map(|&v| self.cx().val_ty(v)))
1312 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1315 return Cow::Borrowed(args);
1318 let casted_args: Vec<_> = param_tys.into_iter()
1321 .map(|(i, (expected_ty, &actual_val))| {
1322 let actual_ty = self.cx().val_ty(actual_val);
1323 if expected_ty != actual_ty {
1324 debug!("Type mismatch in function call of {:?}. \
1325 Expected {:?} for param {}, got {:?}; injecting bitcast",
1326 llfn, expected_ty, i, actual_ty);
1327 self.bitcast(actual_val, expected_ty)
1334 Cow::Owned(casted_args)
1337 fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
1338 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1341 fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
1342 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1345 fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1346 if self.cx.sess().opts.optimize == config::OptLevel::No {
1350 let size = size.bytes();
1355 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1357 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1358 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1364 args: &[&'ll Value],
1365 funclet: Option<&Funclet<'ll>>,
1367 self.count_insn("call");
1369 debug!("Call {:?} with args ({:?})",
1373 let args = self.check_call("call", llfn, args);
1374 let bundle = funclet.map(|funclet| funclet.bundle());
1375 let bundle = bundle.as_ref().map(|b| &*b.raw);
1378 llvm::LLVMRustBuildCall(
1381 args.as_ptr() as *const &llvm::Value,
1382 args.len() as c_uint,
1388 fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1389 self.count_insn("zext");
1391 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1395 fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1396 self.count_insn("structgep");
1397 assert_eq!(idx as c_uint as u64, idx);
1399 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1403 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1407 fn delete_basic_block(&self, bb: &'ll BasicBlock) {
1409 llvm::LLVMDeleteBasicBlock(bb);
1413 fn do_not_inline(&self, llret: &'ll Value) {
1414 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);