1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{self, False, OperandBundleDef, BasicBlock};
13 use common::{self, *};
14 use context::CodegenCx;
17 use libc::{c_uint, c_char};
18 use rustc::ty::TyCtxt;
19 use rustc::ty::layout::{Align, Size};
20 use rustc::session::{config, Session};
21 use rustc_data_structures::small_c_str::SmallCStr;
22 use interfaces::{BuilderMethods, Backend, CommonMethods, CommonWriteMethods};
29 // All Builders must have an llfn associated with them
31 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
32 pub llbuilder: &'ll mut llvm::Builder<'ll>,
33 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
36 impl<V> Drop for Builder<'a, 'll, 'tcx, V> {
39 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
44 // This is a really awful way to get a zero-length c-string, but better (and a
45 // lot more efficient) than doing str::as_c_str("", ...) every time.
46 fn noname() -> *const c_char {
47 static CNULL: c_char = 0;
52 pub struct MemFlags: u8 {
53 const VOLATILE = 1 << 0;
54 const NONTEMPORAL = 1 << 1;
55 const UNALIGNED = 1 << 2;
59 impl Backend for Builder<'a, 'll, 'tcx> {
60 type Value = &'ll Value;
61 type BasicBlock = &'ll BasicBlock;
62 type Type = &'ll type_::Type;
63 type Context = &'ll llvm::Context;
66 impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
68 cx: &'a CodegenCx<'ll, 'tcx>,
72 let bx = Builder::with_cx(cx);
74 let name = SmallCStr::new(name);
75 llvm::LLVMAppendBasicBlockInContext(
81 bx.position_at_end(llbb);
85 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
86 // Create a fresh builder from the crate context.
87 let llbuilder = unsafe {
88 llvm::LLVMCreateBuilderInContext(cx.llcx)
96 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
97 Builder::new_block(self.cx, self.llfn(), name)
100 fn sess(&self) -> &Session {
104 fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
108 fn llfn(&self) -> &'ll Value {
110 llvm::LLVMGetBasicBlockParent(self.llbb())
114 fn llbb(&self) -> &'ll BasicBlock {
116 llvm::LLVMGetInsertBlock(self.llbuilder)
120 fn count_insn(&self, category: &str) {
121 if self.cx().sess().codegen_stats() {
122 self.cx().stats.borrow_mut().n_llvm_insns += 1;
124 if self.cx().sess().count_llvm_insns() {
128 .entry(category.to_string())
133 fn set_value_name(&self, value: &'ll Value, name: &str) {
134 let cname = SmallCStr::new(name);
136 llvm::LLVMSetValueName(value, cname.as_ptr());
140 fn position_at_end(&self, llbb: &'ll BasicBlock) {
142 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
146 fn position_at_start(&self, llbb: &'ll BasicBlock) {
148 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
153 self.count_insn("retvoid");
155 llvm::LLVMBuildRetVoid(self.llbuilder);
159 fn ret(&self, v: &'ll Value) {
160 self.count_insn("ret");
162 llvm::LLVMBuildRet(self.llbuilder, v);
166 fn br(&self, dest: &'ll BasicBlock) {
167 self.count_insn("br");
169 llvm::LLVMBuildBr(self.llbuilder, dest);
176 then_llbb: &'ll BasicBlock,
177 else_llbb: &'ll BasicBlock,
179 self.count_insn("condbr");
181 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
188 else_llbb: &'ll BasicBlock,
192 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
199 then: &'ll BasicBlock,
200 catch: &'ll BasicBlock,
201 bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value {
202 self.count_insn("invoke");
204 debug!("Invoke {:?} with args ({:?})",
208 let args = self.check_call("invoke", llfn, args);
209 let bundle = bundle.map(OperandBundleDef::from_generic);
210 let bundle = bundle.as_ref().map(|b| &*b.raw);
213 llvm::LLVMRustBuildInvoke(self.llbuilder,
216 args.len() as c_uint,
224 fn unreachable(&self) {
225 self.count_insn("unreachable");
227 llvm::LLVMBuildUnreachable(self.llbuilder);
232 fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
233 self.count_insn("add");
235 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
239 fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
240 self.count_insn("fadd");
242 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
246 fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
247 self.count_insn("fadd");
249 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
250 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
255 fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
256 self.count_insn("sub");
258 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
262 fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
263 self.count_insn("fsub");
265 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
269 fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
270 self.count_insn("fsub");
272 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
273 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
278 fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
279 self.count_insn("mul");
281 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
285 fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
286 self.count_insn("fmul");
288 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
292 fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
293 self.count_insn("fmul");
295 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
296 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
302 fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
303 self.count_insn("udiv");
305 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
309 fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
310 self.count_insn("exactudiv");
312 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
316 fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
317 self.count_insn("sdiv");
319 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
323 fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
324 self.count_insn("exactsdiv");
326 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
330 fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
331 self.count_insn("fdiv");
333 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
337 fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
338 self.count_insn("fdiv");
340 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
341 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
346 fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
347 self.count_insn("urem");
349 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
353 fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
354 self.count_insn("srem");
356 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
360 fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
361 self.count_insn("frem");
363 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
367 fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
368 self.count_insn("frem");
370 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
371 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
376 fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
377 self.count_insn("shl");
379 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
383 fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
384 self.count_insn("lshr");
386 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
390 fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
391 self.count_insn("ashr");
393 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
397 fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
398 self.count_insn("and");
400 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
404 fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
405 self.count_insn("or");
407 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
411 fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
412 self.count_insn("xor");
414 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
418 fn neg(&self, v: &'ll Value) -> &'ll Value {
419 self.count_insn("neg");
421 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
425 fn fneg(&self, v: &'ll Value) -> &'ll Value {
426 self.count_insn("fneg");
428 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
432 fn not(&self, v: &'ll Value) -> &'ll Value {
433 self.count_insn("not");
435 llvm::LLVMBuildNot(self.llbuilder, v, noname())
439 fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value {
440 let bx = Builder::with_cx(self.cx);
441 bx.position_at_start(unsafe {
442 llvm::LLVMGetFirstBasicBlock(self.llfn())
444 bx.dynamic_alloca(ty, name, align)
447 fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value {
448 self.count_insn("alloca");
450 let alloca = if name.is_empty() {
451 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
453 let name = SmallCStr::new(name);
454 llvm::LLVMBuildAlloca(self.llbuilder, ty,
457 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
462 fn array_alloca(&self,
466 align: Align) -> &'ll Value {
467 self.count_insn("alloca");
469 let alloca = if name.is_empty() {
470 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
472 let name = SmallCStr::new(name);
473 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
476 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
481 fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
482 self.count_insn("load");
484 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
485 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
490 fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
491 self.count_insn("load.volatile");
493 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
494 llvm::LLVMSetVolatile(insn, llvm::True);
502 order: common::AtomicOrdering,
505 self.count_insn("load.atomic");
507 let load = llvm::LLVMRustBuildAtomicLoad(
511 AtomicOrdering::from_generic(order),
513 // LLVM requires the alignment of atomic loads to be at least the size of the type.
514 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
520 fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
521 if self.sess().target.target.arch == "amdgpu" {
522 // amdgpu/LLVM does something weird and thinks a i64 value is
523 // split into a v2i32, halving the bitwidth LLVM expects,
524 // tripping an assertion. So, for now, just disable this
530 let llty = self.cx.val_ty(load);
532 self.cx.c_uint_big(llty, range.start),
533 self.cx.c_uint_big(llty, range.end)
536 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
537 llvm::LLVMMDNodeInContext(self.cx.llcx,
543 fn nonnull_metadata(&self, load: &'ll Value) {
545 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
546 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
550 fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
551 self.store_with_flags(val, ptr, align, MemFlags::empty())
561 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
562 self.count_insn("store");
563 let ptr = self.check_store(val, ptr);
565 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
566 let align = if flags.contains(MemFlags::UNALIGNED) {
569 align.abi() as c_uint
571 llvm::LLVMSetAlignment(store, align);
572 if flags.contains(MemFlags::VOLATILE) {
573 llvm::LLVMSetVolatile(store, llvm::True);
575 if flags.contains(MemFlags::NONTEMPORAL) {
576 // According to LLVM [1] building a nontemporal store must
577 // *always* point to a metadata value of the integer 1.
579 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
580 let one = self.cx.c_i32(1);
581 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
582 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
588 fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
589 order: common::AtomicOrdering, size: Size) {
590 debug!("Store {:?} -> {:?}", val, ptr);
591 self.count_insn("store.atomic");
592 let ptr = self.check_store(val, ptr);
594 let store = llvm::LLVMRustBuildAtomicStore(
598 AtomicOrdering::from_generic(order),
600 // LLVM requires the alignment of atomic stores to be at least the size of the type.
601 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
605 fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
606 self.count_insn("gep");
608 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
609 indices.len() as c_uint, noname())
613 fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
614 self.count_insn("inboundsgep");
616 llvm::LLVMBuildInBoundsGEP(
617 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
622 fn trunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
623 self.count_insn("trunc");
625 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
629 fn sext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
630 self.count_insn("sext");
632 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
636 fn fptoui(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
637 self.count_insn("fptoui");
639 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
643 fn fptosi(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
644 self.count_insn("fptosi");
646 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
650 fn uitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
651 self.count_insn("uitofp");
653 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
657 fn sitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
658 self.count_insn("sitofp");
660 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
664 fn fptrunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
665 self.count_insn("fptrunc");
667 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
671 fn fpext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
672 self.count_insn("fpext");
674 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
678 fn ptrtoint(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
679 self.count_insn("ptrtoint");
681 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
685 fn inttoptr(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
686 self.count_insn("inttoptr");
688 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
692 fn bitcast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
693 self.count_insn("bitcast");
695 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
700 fn intcast(&self, val: &'ll Value, dest_ty: Self::Type, is_signed: bool) -> &'ll Value {
701 self.count_insn("intcast");
703 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
707 fn pointercast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
708 self.count_insn("pointercast");
710 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
715 fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
716 self.count_insn("icmp");
717 let op = llvm::IntPredicate::from_generic(op);
719 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
723 fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
724 self.count_insn("fcmp");
726 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
730 /* Miscellaneous instructions */
731 fn empty_phi(&self, ty: Self::Type) -> &'ll Value {
732 self.count_insn("emptyphi");
734 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
738 fn phi(&self, ty: Self::Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
739 assert_eq!(vals.len(), bbs.len());
740 let phi = self.empty_phi(ty);
741 self.count_insn("addincoming");
743 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
745 vals.len() as c_uint);
750 fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
751 inputs: &[&'ll Value], output: Self::Type,
752 volatile: bool, alignstack: bool,
753 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
754 self.count_insn("inlineasm");
756 let volatile = if volatile { llvm::True }
757 else { llvm::False };
758 let alignstack = if alignstack { llvm::True }
759 else { llvm::False };
761 let argtys = inputs.iter().map(|v| {
762 debug!("Asm Input Type: {:?}", *v);
764 }).collect::<Vec<_>>();
766 debug!("Asm Output Type: {:?}", output);
767 let fty = type_::Type::func(&argtys[..], output);
769 // Ask LLVM to verify that the constraints are well-formed.
770 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
771 debug!("Constraint verification result: {:?}", constraints_ok);
773 let v = llvm::LLVMRustInlineAsm(
774 fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia));
775 Some(self.call(v, inputs, None))
777 // LLVM has detected an issue with our constraints, bail out
783 fn memcpy(&self, dst: &'ll Value, dst_align: u64,
784 src: &'ll Value, src_align: u64,
785 size: &'ll Value, is_volatile: bool) -> &'ll Value {
787 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align as c_uint,
788 src, src_align as c_uint, size, is_volatile)
792 fn memmove(&self, dst: &'ll Value, dst_align: u64,
793 src: &'ll Value, src_align: u64,
794 size: &'ll Value, is_volatile: bool) -> &'ll Value {
796 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align as c_uint,
797 src, src_align as c_uint, size, is_volatile)
801 fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
802 self.count_insn("minnum");
804 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
805 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
808 fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
809 self.count_insn("maxnum");
811 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
812 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
817 &self, cond: &'ll Value,
818 then_val: &'ll Value,
819 else_val: &'ll Value,
821 self.count_insn("select");
823 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
828 fn va_arg(&self, list: &'ll Value, ty: Self::Type) -> &'ll Value {
829 self.count_insn("vaarg");
831 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
835 fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
836 self.count_insn("extractelement");
838 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
843 &self, vec: &'ll Value,
847 self.count_insn("insertelement");
849 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
853 fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
854 self.count_insn("shufflevector");
856 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
860 fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
862 let elt_ty = self.cx.val_ty(elt);
863 let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64));
864 let vec = self.insert_element(undef, elt, self.cx.c_i32(0));
865 let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64);
866 self.shuffle_vector(vec, undef, self.cx.c_null(vec_i32_ty))
870 fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
871 self.count_insn("vector.reduce.fadd_fast");
873 // FIXME: add a non-fast math version once
874 // https://bugs.llvm.org/show_bug.cgi?id=36732
876 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
877 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
881 fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
882 self.count_insn("vector.reduce.fmul_fast");
884 // FIXME: add a non-fast math version once
885 // https://bugs.llvm.org/show_bug.cgi?id=36732
887 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
888 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
892 fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
893 self.count_insn("vector.reduce.add");
894 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
896 fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
897 self.count_insn("vector.reduce.mul");
898 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
900 fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
901 self.count_insn("vector.reduce.and");
902 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
904 fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
905 self.count_insn("vector.reduce.or");
906 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
908 fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
909 self.count_insn("vector.reduce.xor");
910 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
912 fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
913 self.count_insn("vector.reduce.fmin");
914 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
916 fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
917 self.count_insn("vector.reduce.fmax");
918 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
920 fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
921 self.count_insn("vector.reduce.fmin_fast");
923 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
924 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
928 fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
929 self.count_insn("vector.reduce.fmax_fast");
931 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
932 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
936 fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
937 self.count_insn("vector.reduce.min");
938 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
940 fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
941 self.count_insn("vector.reduce.max");
942 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
945 fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
946 self.count_insn("extractvalue");
947 assert_eq!(idx as c_uint as u64, idx);
949 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
953 fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
954 idx: u64) -> &'ll Value {
955 self.count_insn("insertvalue");
956 assert_eq!(idx as c_uint as u64, idx);
958 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
963 fn landing_pad(&self, ty: Self::Type, pers_fn: &'ll Value,
964 num_clauses: usize) -> &'ll Value {
965 self.count_insn("landingpad");
967 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
968 num_clauses as c_uint, noname())
972 fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
974 llvm::LLVMAddClause(landing_pad, clause);
978 fn set_cleanup(&self, landing_pad: &'ll Value) {
979 self.count_insn("setcleanup");
981 llvm::LLVMSetCleanup(landing_pad, llvm::True);
985 fn resume(&self, exn: &'ll Value) -> &'ll Value {
986 self.count_insn("resume");
988 llvm::LLVMBuildResume(self.llbuilder, exn)
992 fn cleanup_pad(&self,
993 parent: Option<&'ll Value>,
994 args: &[&'ll Value]) -> &'ll Value {
995 self.count_insn("cleanuppad");
996 let name = const_cstr!("cleanuppad");
998 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
1000 args.len() as c_uint,
1004 ret.expect("LLVM does not have support for cleanuppad")
1008 &self, cleanup: &'ll Value,
1009 unwind: Option<&'ll BasicBlock>,
1011 self.count_insn("cleanupret");
1013 llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
1015 ret.expect("LLVM does not have support for cleanupret")
1020 args: &[&'ll Value]) -> &'ll Value {
1021 self.count_insn("catchpad");
1022 let name = const_cstr!("catchpad");
1024 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1025 args.len() as c_uint, args.as_ptr(),
1028 ret.expect("LLVM does not have support for catchpad")
1031 fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value {
1032 self.count_insn("catchret");
1034 llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
1036 ret.expect("LLVM does not have support for catchret")
1041 parent: Option<&'ll Value>,
1042 unwind: Option<&'ll BasicBlock>,
1043 num_handlers: usize,
1045 self.count_insn("catchswitch");
1046 let name = const_cstr!("catchswitch");
1048 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1049 num_handlers as c_uint,
1052 ret.expect("LLVM does not have support for catchswitch")
1055 fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1057 llvm::LLVMRustAddHandler(catch_switch, handler);
1061 fn set_personality_fn(&self, personality: &'ll Value) {
1063 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1067 // Atomic Operations
1073 order: common::AtomicOrdering,
1074 failure_order: common::AtomicOrdering,
1077 let weak = if weak { llvm::True } else { llvm::False };
1079 llvm::LLVMRustBuildAtomicCmpXchg(
1084 AtomicOrdering::from_generic(order),
1085 AtomicOrdering::from_generic(failure_order),
1092 op: common::AtomicRmwBinOp,
1095 order: common::AtomicOrdering,
1098 llvm::LLVMBuildAtomicRMW(
1100 AtomicRmwBinOp::from_generic(op),
1103 AtomicOrdering::from_generic(order),
1108 fn atomic_fence(&self, order: common::AtomicOrdering, scope: common::SynchronizationScope) {
1110 llvm::LLVMRustBuildAtomicFence(
1112 AtomicOrdering::from_generic(order),
1113 SynchronizationScope::from_generic(scope)
1118 fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1120 llvm::LLVMAddCase(s, on_val, dest)
1124 fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1125 self.count_insn("addincoming");
1127 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1131 fn set_invariant_load(&self, load: &'ll Value) {
1133 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1134 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1138 /// Returns the ptr value that should be used for storing `val`.
1139 fn check_store<'b>(&self,
1141 ptr: &'ll Value) -> &'ll Value {
1142 let dest_ptr_ty = self.cx.val_ty(ptr);
1143 let stored_ty = self.cx.val_ty(val);
1144 let stored_ptr_ty = stored_ty.ptr_to();
1146 assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer);
1148 if dest_ptr_ty == stored_ptr_ty {
1151 debug!("Type mismatch in store. \
1152 Expected {:?}, got {:?}; inserting bitcast",
1153 dest_ptr_ty, stored_ptr_ty);
1154 self.bitcast(ptr, stored_ptr_ty)
1158 /// Returns the args that should be used for a call to `llfn`.
1159 fn check_call<'b>(&self,
1162 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1163 let mut fn_ty = self.cx.val_ty(llfn);
1164 // Strip off pointers
1165 while fn_ty.kind() == llvm::TypeKind::Pointer {
1166 fn_ty = fn_ty.element_type();
1169 assert!(fn_ty.kind() == llvm::TypeKind::Function,
1170 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1172 let param_tys = fn_ty.func_params();
1174 let all_args_match = param_tys.iter()
1175 .zip(args.iter().map(|&v| self.cx().val_ty(v)))
1176 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1179 return Cow::Borrowed(args);
1182 let casted_args: Vec<_> = param_tys.into_iter()
1185 .map(|(i, (expected_ty, &actual_val))| {
1186 let actual_ty = self.cx().val_ty(actual_val);
1187 if expected_ty != actual_ty {
1188 debug!("Type mismatch in function call of {:?}. \
1189 Expected {:?} for param {}, got {:?}; injecting bitcast",
1190 llfn, expected_ty, i, actual_ty);
1191 self.bitcast(actual_val, expected_ty)
1198 Cow::Owned(casted_args)
1201 fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
1202 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1205 fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
1206 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1209 /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
1210 /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
1211 /// and the intrinsic for `lt` and passes them to `emit`, which is in
1212 /// charge of generating code to call the passed intrinsic on whatever
1213 /// block of generated code is targeted for the intrinsic.
1215 /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
1216 /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
1217 fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1218 if self.cx.sess().opts.optimize == config::OptLevel::No {
1222 let size = size.bytes();
1227 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1229 let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx));
1230 self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None);
1233 fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
1234 bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value {
1235 self.count_insn("call");
1237 debug!("Call {:?} with args ({:?})",
1241 let args = self.check_call("call", llfn, args);
1242 let bundle = bundle.map(OperandBundleDef::from_generic);
1243 let bundle = bundle.as_ref().map(|b| &*b.raw);
1246 llvm::LLVMRustBuildCall(
1249 args.as_ptr() as *const &llvm::Value,
1250 args.len() as c_uint,
1256 fn zext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
1257 self.count_insn("zext");
1259 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1263 fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1264 self.count_insn("structgep");
1265 assert_eq!(idx as c_uint as u64, idx);
1267 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1271 fn cx(&self) -> &'a CodegenCx<'ll, 'tcx> {