1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{self, False, OperandBundleDef, BasicBlock};
16 use libc::{c_uint, c_char};
17 use rustc::ty::TyCtxt;
18 use rustc::ty::layout::{Align, Size};
19 use rustc::session::{config, Session};
20 use rustc_data_structures::small_c_str::SmallCStr;
21 use traits::{self, BuilderMethods};
28 // All Builders must have an llfn associated with them
30 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
31 pub llbuilder: &'ll mut llvm::Builder<'ll>,
32 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
35 impl<V> Drop for Builder<'a, 'll, 'tcx, V> {
38 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
43 // This is a really awful way to get a zero-length c-string, but better (and a
44 // lot more efficient) than doing str::as_c_str("", ...) every time.
45 fn noname() -> *const c_char {
46 static CNULL: c_char = 0;
51 pub struct MemFlags: u8 {
52 const VOLATILE = 1 << 0;
53 const NONTEMPORAL = 1 << 1;
54 const UNALIGNED = 1 << 2;
58 impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
59 type Value = &'ll Value;
60 type BasicBlock = &'ll BasicBlock;
61 type Type = &'ll type_::Type;
64 cx: &'a CodegenCx<'ll, 'tcx>,
68 let bx = Builder::with_cx(cx);
70 let name = SmallCStr::new(name);
71 llvm::LLVMAppendBasicBlockInContext(
77 bx.position_at_end(llbb);
81 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
82 // Create a fresh builder from the crate context.
83 let llbuilder = unsafe {
84 llvm::LLVMCreateBuilderInContext(cx.llcx)
92 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
93 Builder::new_block(self.cx, self.llfn(), name)
96 fn sess(&self) -> &Session {
100 fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
104 fn llfn(&self) -> &'ll Value {
106 llvm::LLVMGetBasicBlockParent(self.llbb())
110 fn llbb(&self) -> &'ll BasicBlock {
112 llvm::LLVMGetInsertBlock(self.llbuilder)
116 fn count_insn(&self, category: &str) {
117 if self.cx().sess().codegen_stats() {
118 self.cx().stats.borrow_mut().n_llvm_insns += 1;
120 if self.cx().sess().count_llvm_insns() {
124 .entry(category.to_string())
129 fn set_value_name(&self, value: &'ll Value, name: &str) {
130 let cname = SmallCStr::new(name);
132 llvm::LLVMSetValueName(value, cname.as_ptr());
136 fn position_at_end(&self, llbb: &'ll BasicBlock) {
138 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
142 fn position_at_start(&self, llbb: &'ll BasicBlock) {
144 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
149 self.count_insn("retvoid");
151 llvm::LLVMBuildRetVoid(self.llbuilder);
155 fn ret(&self, v: &'ll Value) {
156 self.count_insn("ret");
158 llvm::LLVMBuildRet(self.llbuilder, v);
162 fn br(&self, dest: &'ll BasicBlock) {
163 self.count_insn("br");
165 llvm::LLVMBuildBr(self.llbuilder, dest);
172 then_llbb: &'ll BasicBlock,
173 else_llbb: &'ll BasicBlock,
175 self.count_insn("condbr");
177 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
184 else_llbb: &'ll BasicBlock,
188 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
195 then: &'ll BasicBlock,
196 catch: &'ll BasicBlock,
197 bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value {
198 self.count_insn("invoke");
200 debug!("Invoke {:?} with args ({:?})",
204 let args = self.check_call("invoke", llfn, args);
205 let bundle = bundle.map(OperandBundleDef::from_generic);
206 let bundle = bundle.as_ref().map(|b| &*b.raw);
209 llvm::LLVMRustBuildInvoke(self.llbuilder,
212 args.len() as c_uint,
220 fn unreachable(&self) {
221 self.count_insn("unreachable");
223 llvm::LLVMBuildUnreachable(self.llbuilder);
228 fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
229 self.count_insn("add");
231 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
235 fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
236 self.count_insn("fadd");
238 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
242 fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
243 self.count_insn("fadd");
245 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
246 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
251 fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
252 self.count_insn("sub");
254 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
258 fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
259 self.count_insn("fsub");
261 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
265 fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
266 self.count_insn("fsub");
268 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
269 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
274 fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
275 self.count_insn("mul");
277 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
281 fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
282 self.count_insn("fmul");
284 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
288 fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
289 self.count_insn("fmul");
291 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
292 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
298 fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
299 self.count_insn("udiv");
301 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
305 fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
306 self.count_insn("exactudiv");
308 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
312 fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
313 self.count_insn("sdiv");
315 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
319 fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
320 self.count_insn("exactsdiv");
322 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
326 fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
327 self.count_insn("fdiv");
329 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
333 fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
334 self.count_insn("fdiv");
336 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
337 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
342 fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
343 self.count_insn("urem");
345 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
349 fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
350 self.count_insn("srem");
352 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
356 fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
357 self.count_insn("frem");
359 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
363 fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
364 self.count_insn("frem");
366 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
367 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
372 fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
373 self.count_insn("shl");
375 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
379 fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
380 self.count_insn("lshr");
382 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
386 fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
387 self.count_insn("ashr");
389 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
393 fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
394 self.count_insn("and");
396 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
400 fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
401 self.count_insn("or");
403 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
407 fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
408 self.count_insn("xor");
410 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
414 fn neg(&self, v: &'ll Value) -> &'ll Value {
415 self.count_insn("neg");
417 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
421 fn fneg(&self, v: &'ll Value) -> &'ll Value {
422 self.count_insn("fneg");
424 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
428 fn not(&self, v: &'ll Value) -> &'ll Value {
429 self.count_insn("not");
431 llvm::LLVMBuildNot(self.llbuilder, v, noname())
435 fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value {
436 let bx = Builder::with_cx(self.cx);
437 bx.position_at_start(unsafe {
438 llvm::LLVMGetFirstBasicBlock(self.llfn())
440 bx.dynamic_alloca(ty, name, align)
443 fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value {
444 self.count_insn("alloca");
446 let alloca = if name.is_empty() {
447 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
449 let name = SmallCStr::new(name);
450 llvm::LLVMBuildAlloca(self.llbuilder, ty,
453 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
458 fn array_alloca(&self,
462 align: Align) -> &'ll Value {
463 self.count_insn("alloca");
465 let alloca = if name.is_empty() {
466 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
468 let name = SmallCStr::new(name);
469 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
472 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
477 fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
478 self.count_insn("load");
480 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
481 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
486 fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
487 self.count_insn("load.volatile");
489 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
490 llvm::LLVMSetVolatile(insn, llvm::True);
498 order: traits::AtomicOrdering,
501 self.count_insn("load.atomic");
503 let load = llvm::LLVMRustBuildAtomicLoad(
507 AtomicOrdering::from_generic(order),
509 // LLVM requires the alignment of atomic loads to be at least the size of the type.
510 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
516 fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
517 if self.sess().target.target.arch == "amdgpu" {
518 // amdgpu/LLVM does something weird and thinks a i64 value is
519 // split into a v2i32, halving the bitwidth LLVM expects,
520 // tripping an assertion. So, for now, just disable this
526 let llty = val_ty(load);
528 C_uint_big(llty, range.start),
529 C_uint_big(llty, range.end)
532 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
533 llvm::LLVMMDNodeInContext(self.cx.llcx,
539 fn nonnull_metadata(&self, load: &'ll Value) {
541 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
542 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
546 fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
547 self.store_with_flags(val, ptr, align, MemFlags::empty())
557 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
558 self.count_insn("store");
559 let ptr = self.check_store(val, ptr);
561 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
562 let align = if flags.contains(MemFlags::UNALIGNED) {
565 align.abi() as c_uint
567 llvm::LLVMSetAlignment(store, align);
568 if flags.contains(MemFlags::VOLATILE) {
569 llvm::LLVMSetVolatile(store, llvm::True);
571 if flags.contains(MemFlags::NONTEMPORAL) {
572 // According to LLVM [1] building a nontemporal store must
573 // *always* point to a metadata value of the integer 1.
575 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
576 let one = C_i32(self.cx, 1);
577 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
578 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
584 fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
585 order: traits::AtomicOrdering, size: Size) {
586 debug!("Store {:?} -> {:?}", val, ptr);
587 self.count_insn("store.atomic");
588 let ptr = self.check_store(val, ptr);
590 let store = llvm::LLVMRustBuildAtomicStore(
594 AtomicOrdering::from_generic(order),
596 // LLVM requires the alignment of atomic stores to be at least the size of the type.
597 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
601 fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
602 self.count_insn("gep");
604 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
605 indices.len() as c_uint, noname())
609 fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
610 self.count_insn("inboundsgep");
612 llvm::LLVMBuildInBoundsGEP(
613 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
618 fn trunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
619 self.count_insn("trunc");
621 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
625 fn sext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
626 self.count_insn("sext");
628 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
632 fn fptoui(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
633 self.count_insn("fptoui");
635 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
639 fn fptosi(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
640 self.count_insn("fptosi");
642 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
646 fn uitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
647 self.count_insn("uitofp");
649 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
653 fn sitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
654 self.count_insn("sitofp");
656 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
660 fn fptrunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
661 self.count_insn("fptrunc");
663 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
667 fn fpext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
668 self.count_insn("fpext");
670 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
674 fn ptrtoint(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
675 self.count_insn("ptrtoint");
677 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
681 fn inttoptr(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
682 self.count_insn("inttoptr");
684 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
688 fn bitcast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
689 self.count_insn("bitcast");
691 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
696 fn intcast(&self, val: &'ll Value, dest_ty: Self::Type, is_signed: bool) -> &'ll Value {
697 self.count_insn("intcast");
699 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
703 fn pointercast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
704 self.count_insn("pointercast");
706 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
711 fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
712 self.count_insn("icmp");
713 let op = llvm::IntPredicate::from_generic(op);
715 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
719 fn fcmp(&self, op: traits::RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
720 self.count_insn("fcmp");
722 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
726 /* Miscellaneous instructions */
727 fn empty_phi(&self, ty: Self::Type) -> &'ll Value {
728 self.count_insn("emptyphi");
730 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
734 fn phi(&self, ty: Self::Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
735 assert_eq!(vals.len(), bbs.len());
736 let phi = self.empty_phi(ty);
737 self.count_insn("addincoming");
739 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
741 vals.len() as c_uint);
746 fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
747 inputs: &[&'ll Value], output: Self::Type,
748 volatile: bool, alignstack: bool,
749 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
750 self.count_insn("inlineasm");
752 let volatile = if volatile { llvm::True }
753 else { llvm::False };
754 let alignstack = if alignstack { llvm::True }
755 else { llvm::False };
757 let argtys = inputs.iter().map(|v| {
758 debug!("Asm Input Type: {:?}", *v);
760 }).collect::<Vec<_>>();
762 debug!("Asm Output Type: {:?}", output);
763 let fty = type_::Type::func(&argtys[..], output);
765 // Ask LLVM to verify that the constraints are well-formed.
766 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
767 debug!("Constraint verification result: {:?}", constraints_ok);
769 let v = llvm::LLVMRustInlineAsm(
770 fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia));
771 Some(self.call(v, inputs, None))
773 // LLVM has detected an issue with our constraints, bail out
779 fn memcpy(&self, dst: &'ll Value, dst_align: u64,
780 src: &'ll Value, src_align: u64,
781 size: &'ll Value, is_volatile: bool) -> &'ll Value {
783 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align as c_uint,
784 src, src_align as c_uint, size, is_volatile)
788 fn memmove(&self, dst: &'ll Value, dst_align: u64,
789 src: &'ll Value, src_align: u64,
790 size: &'ll Value, is_volatile: bool) -> &'ll Value {
792 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align as c_uint,
793 src, src_align as c_uint, size, is_volatile)
797 fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
798 self.count_insn("minnum");
800 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
801 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
804 fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
805 self.count_insn("maxnum");
807 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
808 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
813 &self, cond: &'ll Value,
814 then_val: &'ll Value,
815 else_val: &'ll Value,
817 self.count_insn("select");
819 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
824 fn va_arg(&self, list: &'ll Value, ty: Self::Type) -> &'ll Value {
825 self.count_insn("vaarg");
827 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
831 fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
832 self.count_insn("extractelement");
834 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
839 &self, vec: &'ll Value,
843 self.count_insn("insertelement");
845 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
849 fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
850 self.count_insn("shufflevector");
852 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
856 fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
858 let elt_ty = val_ty(elt);
859 let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64));
860 let vec = self.insert_element(undef, elt, C_i32(self.cx, 0));
861 let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64);
862 self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
866 fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
867 self.count_insn("vector.reduce.fadd_fast");
869 // FIXME: add a non-fast math version once
870 // https://bugs.llvm.org/show_bug.cgi?id=36732
872 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
873 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
877 fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
878 self.count_insn("vector.reduce.fmul_fast");
880 // FIXME: add a non-fast math version once
881 // https://bugs.llvm.org/show_bug.cgi?id=36732
883 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
884 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
888 fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
889 self.count_insn("vector.reduce.add");
890 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
892 fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
893 self.count_insn("vector.reduce.mul");
894 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
896 fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
897 self.count_insn("vector.reduce.and");
898 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
900 fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
901 self.count_insn("vector.reduce.or");
902 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
904 fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
905 self.count_insn("vector.reduce.xor");
906 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
908 fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
909 self.count_insn("vector.reduce.fmin");
910 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
912 fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
913 self.count_insn("vector.reduce.fmax");
914 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
916 fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
917 self.count_insn("vector.reduce.fmin_fast");
919 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
920 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
924 fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
925 self.count_insn("vector.reduce.fmax_fast");
927 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
928 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
932 fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
933 self.count_insn("vector.reduce.min");
934 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
936 fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
937 self.count_insn("vector.reduce.max");
938 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
941 fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
942 self.count_insn("extractvalue");
943 assert_eq!(idx as c_uint as u64, idx);
945 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
949 fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
950 idx: u64) -> &'ll Value {
951 self.count_insn("insertvalue");
952 assert_eq!(idx as c_uint as u64, idx);
954 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
959 fn landing_pad(&self, ty: Self::Type, pers_fn: &'ll Value,
960 num_clauses: usize) -> &'ll Value {
961 self.count_insn("landingpad");
963 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
964 num_clauses as c_uint, noname())
968 fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
970 llvm::LLVMAddClause(landing_pad, clause);
974 fn set_cleanup(&self, landing_pad: &'ll Value) {
975 self.count_insn("setcleanup");
977 llvm::LLVMSetCleanup(landing_pad, llvm::True);
981 fn resume(&self, exn: &'ll Value) -> &'ll Value {
982 self.count_insn("resume");
984 llvm::LLVMBuildResume(self.llbuilder, exn)
988 fn cleanup_pad(&self,
989 parent: Option<&'ll Value>,
990 args: &[&'ll Value]) -> &'ll Value {
991 self.count_insn("cleanuppad");
992 let name = const_cstr!("cleanuppad");
994 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
996 args.len() as c_uint,
1000 ret.expect("LLVM does not have support for cleanuppad")
1004 &self, cleanup: &'ll Value,
1005 unwind: Option<&'ll BasicBlock>,
1007 self.count_insn("cleanupret");
1009 llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
1011 ret.expect("LLVM does not have support for cleanupret")
1016 args: &[&'ll Value]) -> &'ll Value {
1017 self.count_insn("catchpad");
1018 let name = const_cstr!("catchpad");
1020 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1021 args.len() as c_uint, args.as_ptr(),
1024 ret.expect("LLVM does not have support for catchpad")
1027 fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value {
1028 self.count_insn("catchret");
1030 llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
1032 ret.expect("LLVM does not have support for catchret")
1037 parent: Option<&'ll Value>,
1038 unwind: Option<&'ll BasicBlock>,
1039 num_handlers: usize,
1041 self.count_insn("catchswitch");
1042 let name = const_cstr!("catchswitch");
1044 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1045 num_handlers as c_uint,
1048 ret.expect("LLVM does not have support for catchswitch")
1051 fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1053 llvm::LLVMRustAddHandler(catch_switch, handler);
1057 fn set_personality_fn(&self, personality: &'ll Value) {
1059 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1063 // Atomic Operations
1069 order: traits::AtomicOrdering,
1070 failure_order: traits::AtomicOrdering,
1073 let weak = if weak { llvm::True } else { llvm::False };
1075 llvm::LLVMRustBuildAtomicCmpXchg(
1080 AtomicOrdering::from_generic(order),
1081 AtomicOrdering::from_generic(failure_order),
1088 op: traits::AtomicRmwBinOp,
1091 order: traits::AtomicOrdering,
1094 llvm::LLVMBuildAtomicRMW(
1096 AtomicRmwBinOp::from_generic(op),
1099 AtomicOrdering::from_generic(order),
1104 fn atomic_fence(&self, order: traits::AtomicOrdering, scope: traits::SynchronizationScope) {
1106 llvm::LLVMRustBuildAtomicFence(
1108 AtomicOrdering::from_generic(order),
1109 SynchronizationScope::from_generic(scope)
1114 fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1116 llvm::LLVMAddCase(s, on_val, dest)
1120 fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1121 self.count_insn("addincoming");
1123 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1127 fn set_invariant_load(&self, load: &'ll Value) {
1129 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1130 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1134 /// Returns the ptr value that should be used for storing `val`.
1135 fn check_store<'b>(&self,
1137 ptr: &'ll Value) -> &'ll Value {
1138 let dest_ptr_ty = val_ty(ptr);
1139 let stored_ty = val_ty(val);
1140 let stored_ptr_ty = stored_ty.ptr_to();
1142 assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer);
1144 if dest_ptr_ty == stored_ptr_ty {
1147 debug!("Type mismatch in store. \
1148 Expected {:?}, got {:?}; inserting bitcast",
1149 dest_ptr_ty, stored_ptr_ty);
1150 self.bitcast(ptr, stored_ptr_ty)
1154 /// Returns the args that should be used for a call to `llfn`.
1155 fn check_call<'b>(&self,
1158 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1159 let mut fn_ty = val_ty(llfn);
1160 // Strip off pointers
1161 while fn_ty.kind() == llvm::TypeKind::Pointer {
1162 fn_ty = fn_ty.element_type();
1165 assert!(fn_ty.kind() == llvm::TypeKind::Function,
1166 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1168 let param_tys = fn_ty.func_params();
1170 let all_args_match = param_tys.iter()
1171 .zip(args.iter().map(|&v| val_ty(v)))
1172 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1175 return Cow::Borrowed(args);
1178 let casted_args: Vec<_> = param_tys.into_iter()
1181 .map(|(i, (expected_ty, &actual_val))| {
1182 let actual_ty = val_ty(actual_val);
1183 if expected_ty != actual_ty {
1184 debug!("Type mismatch in function call of {:?}. \
1185 Expected {:?} for param {}, got {:?}; injecting bitcast",
1186 llfn, expected_ty, i, actual_ty);
1187 self.bitcast(actual_val, expected_ty)
1194 Cow::Owned(casted_args)
1197 fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
1198 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1201 fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
1202 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1205 /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
1206 /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
1207 /// and the intrinsic for `lt` and passes them to `emit`, which is in
1208 /// charge of generating code to call the passed intrinsic on whatever
1209 /// block of generated code is targeted for the intrinsic.
1211 /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
1212 /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
1213 fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1214 if self.cx.sess().opts.optimize == config::OptLevel::No {
1218 let size = size.bytes();
1223 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1225 let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx));
1226 self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None);
1229 fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
1230 bundle: Option<&traits::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value {
1231 self.count_insn("call");
1233 debug!("Call {:?} with args ({:?})",
1237 let args = self.check_call("call", llfn, args);
1238 let bundle = bundle.map(OperandBundleDef::from_generic);
1239 let bundle = bundle.as_ref().map(|b| &*b.raw);
1242 llvm::LLVMRustBuildCall(
1245 args.as_ptr() as *const &llvm::Value,
1246 args.len() as c_uint,
1252 fn zext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
1253 self.count_insn("zext");
1255 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1259 fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1260 self.count_insn("structgep");
1261 assert_eq!(idx as c_uint as u64, idx);
1263 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1267 fn cx(&self) -> &'a CodegenCx<'ll, 'tcx> {