1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{self, False, OperandBundleDef, BasicBlock};
13 use common::{self, *};
14 use context::CodegenCx;
17 use libc::{c_uint, c_char};
18 use rustc::ty::TyCtxt;
19 use rustc::ty::layout::{Align, Size};
20 use rustc::session::{config, Session};
21 use rustc_data_structures::small_c_str::SmallCStr;
29 // All Builders must have an llfn associated with them
31 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
32 pub llbuilder: &'ll mut llvm::Builder<'ll>,
33 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
36 impl<V> Drop for Builder<'a, 'll, 'tcx, V> {
39 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
44 // This is a really awful way to get a zero-length c-string, but better (and a
45 // lot more efficient) than doing str::as_c_str("", ...) every time.
46 fn noname() -> *const c_char {
47 static CNULL: c_char = 0;
52 pub struct MemFlags: u8 {
53 const VOLATILE = 1 << 0;
54 const NONTEMPORAL = 1 << 1;
55 const UNALIGNED = 1 << 2;
59 impl HasCodegen for Builder<'a, 'll, 'tcx> {
60 type CodegenCx = CodegenCx<'ll, 'tcx>;
63 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
65 cx: &'a CodegenCx<'ll, 'tcx>,
69 let bx = Builder::with_cx(cx);
71 let name = SmallCStr::new(name);
72 llvm::LLVMAppendBasicBlockInContext(
78 bx.position_at_end(llbb);
82 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
83 // Create a fresh builder from the crate context.
84 let llbuilder = unsafe {
85 llvm::LLVMCreateBuilderInContext(cx.llcx)
93 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
94 Builder::new_block(self.cx, self.llfn(), name)
97 fn sess(&self) -> &Session {
101 fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
105 fn llfn(&self) -> &'ll Value {
107 llvm::LLVMGetBasicBlockParent(self.llbb())
111 fn llbb(&self) -> &'ll BasicBlock {
113 llvm::LLVMGetInsertBlock(self.llbuilder)
117 fn count_insn(&self, category: &str) {
118 if self.cx().sess().codegen_stats() {
119 self.cx().stats.borrow_mut().n_llvm_insns += 1;
121 if self.cx().sess().count_llvm_insns() {
125 .entry(category.to_string())
130 fn set_value_name(&self, value: &'ll Value, name: &str) {
131 let cname = SmallCStr::new(name);
133 llvm::LLVMSetValueName(value, cname.as_ptr());
137 fn position_at_end(&self, llbb: &'ll BasicBlock) {
139 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
143 fn position_at_start(&self, llbb: &'ll BasicBlock) {
145 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
150 self.count_insn("retvoid");
152 llvm::LLVMBuildRetVoid(self.llbuilder);
156 fn ret(&self, v: &'ll Value) {
157 self.count_insn("ret");
159 llvm::LLVMBuildRet(self.llbuilder, v);
163 fn br(&self, dest: &'ll BasicBlock) {
164 self.count_insn("br");
166 llvm::LLVMBuildBr(self.llbuilder, dest);
173 then_llbb: &'ll BasicBlock,
174 else_llbb: &'ll BasicBlock,
176 self.count_insn("condbr");
178 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
185 else_llbb: &'ll BasicBlock,
189 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
196 then: &'ll BasicBlock,
197 catch: &'ll BasicBlock,
198 bundle: Option<&common::OperandBundleDef<&'ll Value>>) -> &'ll Value {
199 self.count_insn("invoke");
201 debug!("Invoke {:?} with args ({:?})",
205 let args = self.check_call("invoke", llfn, args);
206 let bundle = bundle.map(OperandBundleDef::from_generic);
207 let bundle = bundle.as_ref().map(|b| &*b.raw);
210 llvm::LLVMRustBuildInvoke(self.llbuilder,
213 args.len() as c_uint,
221 fn unreachable(&self) {
222 self.count_insn("unreachable");
224 llvm::LLVMBuildUnreachable(self.llbuilder);
229 fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
230 self.count_insn("add");
232 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
236 fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
237 self.count_insn("fadd");
239 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
243 fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
244 self.count_insn("fadd");
246 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
247 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
252 fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
253 self.count_insn("sub");
255 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
259 fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
260 self.count_insn("fsub");
262 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
266 fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
267 self.count_insn("fsub");
269 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
270 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
275 fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
276 self.count_insn("mul");
278 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
282 fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
283 self.count_insn("fmul");
285 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
289 fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
290 self.count_insn("fmul");
292 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
293 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
299 fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
300 self.count_insn("udiv");
302 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
306 fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
307 self.count_insn("exactudiv");
309 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
313 fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
314 self.count_insn("sdiv");
316 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
320 fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
321 self.count_insn("exactsdiv");
323 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
327 fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
328 self.count_insn("fdiv");
330 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
334 fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
335 self.count_insn("fdiv");
337 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
338 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
343 fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
344 self.count_insn("urem");
346 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
350 fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
351 self.count_insn("srem");
353 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
357 fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
358 self.count_insn("frem");
360 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
364 fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
365 self.count_insn("frem");
367 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
368 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
373 fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
374 self.count_insn("shl");
376 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
380 fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
381 self.count_insn("lshr");
383 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
387 fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
388 self.count_insn("ashr");
390 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
394 fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
395 self.count_insn("and");
397 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
401 fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
402 self.count_insn("or");
404 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
408 fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
409 self.count_insn("xor");
411 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
415 fn neg(&self, v: &'ll Value) -> &'ll Value {
416 self.count_insn("neg");
418 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
422 fn fneg(&self, v: &'ll Value) -> &'ll Value {
423 self.count_insn("fneg");
425 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
429 fn not(&self, v: &'ll Value) -> &'ll Value {
430 self.count_insn("not");
432 llvm::LLVMBuildNot(self.llbuilder, v, noname())
436 fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
437 let bx = Builder::with_cx(self.cx);
438 bx.position_at_start(unsafe {
439 llvm::LLVMGetFirstBasicBlock(self.llfn())
441 bx.dynamic_alloca(ty, name, align)
444 fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
445 self.count_insn("alloca");
447 let alloca = if name.is_empty() {
448 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
450 let name = SmallCStr::new(name);
451 llvm::LLVMBuildAlloca(self.llbuilder, ty,
454 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
459 fn array_alloca(&self,
463 align: Align) -> &'ll Value {
464 self.count_insn("alloca");
466 let alloca = if name.is_empty() {
467 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
469 let name = SmallCStr::new(name);
470 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
473 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
478 fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
479 self.count_insn("load");
481 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
482 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
487 fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
488 self.count_insn("load.volatile");
490 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
491 llvm::LLVMSetVolatile(insn, llvm::True);
499 order: common::AtomicOrdering,
502 self.count_insn("load.atomic");
504 let load = llvm::LLVMRustBuildAtomicLoad(
508 AtomicOrdering::from_generic(order),
510 // LLVM requires the alignment of atomic loads to be at least the size of the type.
511 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
517 fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
518 if self.sess().target.target.arch == "amdgpu" {
519 // amdgpu/LLVM does something weird and thinks a i64 value is
520 // split into a v2i32, halving the bitwidth LLVM expects,
521 // tripping an assertion. So, for now, just disable this
527 let llty = self.cx.val_ty(load);
529 self.cx.const_uint_big(llty, range.start),
530 self.cx.const_uint_big(llty, range.end)
533 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
534 llvm::LLVMMDNodeInContext(self.cx.llcx,
540 fn nonnull_metadata(&self, load: &'ll Value) {
542 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
543 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
547 fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
548 self.store_with_flags(val, ptr, align, MemFlags::empty())
558 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
559 self.count_insn("store");
560 let ptr = self.check_store(val, ptr);
562 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
563 let align = if flags.contains(MemFlags::UNALIGNED) {
566 align.abi() as c_uint
568 llvm::LLVMSetAlignment(store, align);
569 if flags.contains(MemFlags::VOLATILE) {
570 llvm::LLVMSetVolatile(store, llvm::True);
572 if flags.contains(MemFlags::NONTEMPORAL) {
573 // According to LLVM [1] building a nontemporal store must
574 // *always* point to a metadata value of the integer 1.
576 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
577 let one = self.cx.const_i32(1);
578 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
579 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
585 fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
586 order: common::AtomicOrdering, size: Size) {
587 debug!("Store {:?} -> {:?}", val, ptr);
588 self.count_insn("store.atomic");
589 let ptr = self.check_store(val, ptr);
591 let store = llvm::LLVMRustBuildAtomicStore(
595 AtomicOrdering::from_generic(order),
597 // LLVM requires the alignment of atomic stores to be at least the size of the type.
598 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
602 fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
603 self.count_insn("gep");
605 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
606 indices.len() as c_uint, noname())
610 fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
611 self.count_insn("inboundsgep");
613 llvm::LLVMBuildInBoundsGEP(
614 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
619 fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
620 self.count_insn("trunc");
622 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
626 fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
627 self.count_insn("sext");
629 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
633 fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
634 self.count_insn("fptoui");
636 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
640 fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
641 self.count_insn("fptosi");
643 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
647 fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
648 self.count_insn("uitofp");
650 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
654 fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
655 self.count_insn("sitofp");
657 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
661 fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
662 self.count_insn("fptrunc");
664 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
668 fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
669 self.count_insn("fpext");
671 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
675 fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
676 self.count_insn("ptrtoint");
678 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
682 fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
683 self.count_insn("inttoptr");
685 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
689 fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
690 self.count_insn("bitcast");
692 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
697 fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
698 self.count_insn("intcast");
700 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
704 fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
705 self.count_insn("pointercast");
707 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
712 fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
713 self.count_insn("icmp");
714 let op = llvm::IntPredicate::from_generic(op);
716 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
720 fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
721 self.count_insn("fcmp");
723 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
727 /* Miscellaneous instructions */
728 fn empty_phi(&self, ty: &'ll Type) -> &'ll Value {
729 self.count_insn("emptyphi");
731 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
735 fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
736 assert_eq!(vals.len(), bbs.len());
737 let phi = self.empty_phi(ty);
738 self.count_insn("addincoming");
740 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
742 vals.len() as c_uint);
747 fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
748 inputs: &[&'ll Value], output: &'ll Type,
749 volatile: bool, alignstack: bool,
750 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
751 self.count_insn("inlineasm");
753 let volatile = if volatile { llvm::True }
754 else { llvm::False };
755 let alignstack = if alignstack { llvm::True }
756 else { llvm::False };
758 let argtys = inputs.iter().map(|v| {
759 debug!("Asm Input Type: {:?}", *v);
761 }).collect::<Vec<_>>();
763 debug!("Asm Output Type: {:?}", output);
764 let fty = self.cx().type_func(&argtys[..], output);
766 // Ask LLVM to verify that the constraints are well-formed.
767 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
768 debug!("Constraint verification result: {:?}", constraints_ok);
770 let v = llvm::LLVMRustInlineAsm(
771 fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia));
772 Some(self.call(v, inputs, None))
774 // LLVM has detected an issue with our constraints, bail out
780 fn memcpy(&self, dst: &'ll Value, dst_align: Align,
781 src: &'ll Value, src_align: Align,
782 size: &'ll Value, flags: MemFlags) {
783 if flags.contains(MemFlags::NONTEMPORAL) {
784 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
785 let val = self.load(src, src_align);
786 let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
787 self.store_with_flags(val, ptr, dst_align, flags);
790 let size = self.intcast(size, self.cx().type_isize(), false);
791 let is_volatile = flags.contains(MemFlags::VOLATILE);
792 let dst = self.pointercast(dst, self.cx().type_i8p());
793 let src = self.pointercast(src, self.cx().type_i8p());
795 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint,
796 src, src_align.abi() as c_uint, size, is_volatile);
800 fn memmove(&self, dst: &'ll Value, dst_align: Align,
801 src: &'ll Value, src_align: Align,
802 size: &'ll Value, flags: MemFlags) {
803 if flags.contains(MemFlags::NONTEMPORAL) {
804 // HACK(nox): This is inefficient but there is no nontemporal memmove.
805 let val = self.load(src, src_align);
806 let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
807 self.store_with_flags(val, ptr, dst_align, flags);
810 let size = self.intcast(size, self.cx().type_isize(), false);
811 let is_volatile = flags.contains(MemFlags::VOLATILE);
812 let dst = self.pointercast(dst, self.cx().type_i8p());
813 let src = self.pointercast(src, self.cx().type_i8p());
815 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint,
816 src, src_align.abi() as c_uint, size, is_volatile);
823 fill_byte: &'ll Value,
828 let ptr_width = &self.sess().target.target.target_pointer_width;
829 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
830 let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
831 let ptr = self.pointercast(ptr, self.cx().type_i8p());
832 let align = self.cx().const_u32(align.abi() as u32);
833 let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
834 self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
837 fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
838 self.count_insn("minnum");
840 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
841 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
844 fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
845 self.count_insn("maxnum");
847 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
848 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
853 &self, cond: &'ll Value,
854 then_val: &'ll Value,
855 else_val: &'ll Value,
857 self.count_insn("select");
859 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
864 fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
865 self.count_insn("vaarg");
867 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
871 fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
872 self.count_insn("extractelement");
874 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
879 &self, vec: &'ll Value,
883 self.count_insn("insertelement");
885 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
889 fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
890 self.count_insn("shufflevector");
892 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
896 fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
898 let elt_ty = self.cx.val_ty(elt);
899 let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
900 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
901 let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64);
902 self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
906 fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
907 self.count_insn("vector.reduce.fadd_fast");
909 // FIXME: add a non-fast math version once
910 // https://bugs.llvm.org/show_bug.cgi?id=36732
912 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
913 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
917 fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
918 self.count_insn("vector.reduce.fmul_fast");
920 // FIXME: add a non-fast math version once
921 // https://bugs.llvm.org/show_bug.cgi?id=36732
923 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
924 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
928 fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
929 self.count_insn("vector.reduce.add");
930 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
932 fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
933 self.count_insn("vector.reduce.mul");
934 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
936 fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
937 self.count_insn("vector.reduce.and");
938 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
940 fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
941 self.count_insn("vector.reduce.or");
942 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
944 fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
945 self.count_insn("vector.reduce.xor");
946 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
948 fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
949 self.count_insn("vector.reduce.fmin");
950 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
952 fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
953 self.count_insn("vector.reduce.fmax");
954 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
956 fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
957 self.count_insn("vector.reduce.fmin_fast");
959 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
960 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
964 fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
965 self.count_insn("vector.reduce.fmax_fast");
967 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
968 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
972 fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
973 self.count_insn("vector.reduce.min");
974 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
976 fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
977 self.count_insn("vector.reduce.max");
978 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
981 fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
982 self.count_insn("extractvalue");
983 assert_eq!(idx as c_uint as u64, idx);
985 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
989 fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
990 idx: u64) -> &'ll Value {
991 self.count_insn("insertvalue");
992 assert_eq!(idx as c_uint as u64, idx);
994 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
999 fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value,
1000 num_clauses: usize) -> &'ll Value {
1001 self.count_insn("landingpad");
1003 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
1004 num_clauses as c_uint, noname())
1008 fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
1010 llvm::LLVMAddClause(landing_pad, clause);
1014 fn set_cleanup(&self, landing_pad: &'ll Value) {
1015 self.count_insn("setcleanup");
1017 llvm::LLVMSetCleanup(landing_pad, llvm::True);
1021 fn resume(&self, exn: &'ll Value) -> &'ll Value {
1022 self.count_insn("resume");
1024 llvm::LLVMBuildResume(self.llbuilder, exn)
1028 fn cleanup_pad(&self,
1029 parent: Option<&'ll Value>,
1030 args: &[&'ll Value]) -> &'ll Value {
1031 self.count_insn("cleanuppad");
1032 let name = const_cstr!("cleanuppad");
1034 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
1036 args.len() as c_uint,
1040 ret.expect("LLVM does not have support for cleanuppad")
1044 &self, cleanup: &'ll Value,
1045 unwind: Option<&'ll BasicBlock>,
1047 self.count_insn("cleanupret");
1049 llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
1051 ret.expect("LLVM does not have support for cleanupret")
1056 args: &[&'ll Value]) -> &'ll Value {
1057 self.count_insn("catchpad");
1058 let name = const_cstr!("catchpad");
1060 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1061 args.len() as c_uint, args.as_ptr(),
1064 ret.expect("LLVM does not have support for catchpad")
1067 fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value {
1068 self.count_insn("catchret");
1070 llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
1072 ret.expect("LLVM does not have support for catchret")
1077 parent: Option<&'ll Value>,
1078 unwind: Option<&'ll BasicBlock>,
1079 num_handlers: usize,
1081 self.count_insn("catchswitch");
1082 let name = const_cstr!("catchswitch");
1084 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1085 num_handlers as c_uint,
1088 ret.expect("LLVM does not have support for catchswitch")
1091 fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1093 llvm::LLVMRustAddHandler(catch_switch, handler);
1097 fn set_personality_fn(&self, personality: &'ll Value) {
1099 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1103 // Atomic Operations
1109 order: common::AtomicOrdering,
1110 failure_order: common::AtomicOrdering,
1113 let weak = if weak { llvm::True } else { llvm::False };
1115 llvm::LLVMRustBuildAtomicCmpXchg(
1120 AtomicOrdering::from_generic(order),
1121 AtomicOrdering::from_generic(failure_order),
1128 op: common::AtomicRmwBinOp,
1131 order: common::AtomicOrdering,
1134 llvm::LLVMBuildAtomicRMW(
1136 AtomicRmwBinOp::from_generic(op),
1139 AtomicOrdering::from_generic(order),
1144 fn atomic_fence(&self, order: common::AtomicOrdering, scope: common::SynchronizationScope) {
1146 llvm::LLVMRustBuildAtomicFence(
1148 AtomicOrdering::from_generic(order),
1149 SynchronizationScope::from_generic(scope)
1154 fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1156 llvm::LLVMAddCase(s, on_val, dest)
1160 fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1161 self.count_insn("addincoming");
1163 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1167 fn set_invariant_load(&self, load: &'ll Value) {
1169 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1170 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1174 /// Returns the ptr value that should be used for storing `val`.
1175 fn check_store<'b>(&self,
1177 ptr: &'ll Value) -> &'ll Value {
1178 let dest_ptr_ty = self.cx.val_ty(ptr);
1179 let stored_ty = self.cx.val_ty(val);
1180 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1182 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1184 if dest_ptr_ty == stored_ptr_ty {
1187 debug!("Type mismatch in store. \
1188 Expected {:?}, got {:?}; inserting bitcast",
1189 dest_ptr_ty, stored_ptr_ty);
1190 self.bitcast(ptr, stored_ptr_ty)
1194 /// Returns the args that should be used for a call to `llfn`.
1195 fn check_call<'b>(&self,
1198 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1199 let mut fn_ty = self.cx.val_ty(llfn);
1200 // Strip off pointers
1201 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1202 fn_ty = self.cx.element_type(fn_ty);
1205 assert!(self.cx.type_kind(fn_ty) == TypeKind::Function,
1206 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1208 let param_tys = self.cx.func_params_types(fn_ty);
1210 let all_args_match = param_tys.iter()
1211 .zip(args.iter().map(|&v| self.cx().val_ty(v)))
1212 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1215 return Cow::Borrowed(args);
1218 let casted_args: Vec<_> = param_tys.into_iter()
1221 .map(|(i, (expected_ty, &actual_val))| {
1222 let actual_ty = self.cx().val_ty(actual_val);
1223 if expected_ty != actual_ty {
1224 debug!("Type mismatch in function call of {:?}. \
1225 Expected {:?} for param {}, got {:?}; injecting bitcast",
1226 llfn, expected_ty, i, actual_ty);
1227 self.bitcast(actual_val, expected_ty)
1234 Cow::Owned(casted_args)
1237 fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
1238 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1241 fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
1242 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1245 /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
1246 /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
1247 /// and the intrinsic for `lt` and passes them to `emit`, which is in
1248 /// charge of generating code to call the passed intrinsic on whatever
1249 /// block of generated code is targeted for the intrinsic.
1251 /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
1252 /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
1253 fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1254 if self.cx.sess().opts.optimize == config::OptLevel::No {
1258 let size = size.bytes();
1263 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1265 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1266 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1269 fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
1270 bundle: Option<&common::OperandBundleDef<&'ll Value>>) -> &'ll Value {
1271 self.count_insn("call");
1273 debug!("Call {:?} with args ({:?})",
1277 let args = self.check_call("call", llfn, args);
1278 let bundle = bundle.map(OperandBundleDef::from_generic);
1279 let bundle = bundle.as_ref().map(|b| &*b.raw);
1282 llvm::LLVMRustBuildCall(
1285 args.as_ptr() as *const &llvm::Value,
1286 args.len() as c_uint,
1292 fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1293 self.count_insn("zext");
1295 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1299 fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1300 self.count_insn("structgep");
1301 assert_eq!(idx as c_uint as u64, idx);
1303 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1307 fn cx(&self) -> &'a CodegenCx<'ll, 'tcx> {