1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{self, False, OperandBundleDef, BasicBlock};
13 use common::{self, *};
14 use context::CodegenCx;
17 use libc::{c_uint, c_char};
18 use rustc::ty::TyCtxt;
19 use rustc::ty::layout::{Align, Size};
20 use rustc::session::{config, Session};
21 use rustc_data_structures::small_c_str::SmallCStr;
22 use interfaces::{BuilderMethods, Backend, CommonMethods, CommonWriteMethods, TypeMethods};
29 // All Builders must have an llfn associated with them
31 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
32 pub llbuilder: &'ll mut llvm::Builder<'ll>,
33 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
36 impl<V> Drop for Builder<'a, 'll, 'tcx, V> {
39 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
44 // This is a really awful way to get a zero-length c-string, but better (and a
45 // lot more efficient) than doing str::as_c_str("", ...) every time.
46 fn noname() -> *const c_char {
47 static CNULL: c_char = 0;
52 pub struct MemFlags: u8 {
53 const VOLATILE = 1 << 0;
54 const NONTEMPORAL = 1 << 1;
55 const UNALIGNED = 1 << 2;
59 impl Backend for Builder<'a, 'll, 'tcx> {
60 type Value = &'ll Value;
61 type BasicBlock = &'ll BasicBlock;
62 type Type = &'ll type_::Type;
63 type TypeKind = llvm::TypeKind;
64 type Context = &'ll llvm::Context;
67 impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
69 cx: &'a CodegenCx<'ll, 'tcx>,
73 let bx = Builder::with_cx(cx);
75 let name = SmallCStr::new(name);
76 llvm::LLVMAppendBasicBlockInContext(
82 bx.position_at_end(llbb);
86 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
87 // Create a fresh builder from the crate context.
88 let llbuilder = unsafe {
89 llvm::LLVMCreateBuilderInContext(cx.llcx)
97 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
98 Builder::new_block(self.cx, self.llfn(), name)
101 fn sess(&self) -> &Session {
105 fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
109 fn llfn(&self) -> &'ll Value {
111 llvm::LLVMGetBasicBlockParent(self.llbb())
115 fn llbb(&self) -> &'ll BasicBlock {
117 llvm::LLVMGetInsertBlock(self.llbuilder)
121 fn count_insn(&self, category: &str) {
122 if self.cx().sess().codegen_stats() {
123 self.cx().stats.borrow_mut().n_llvm_insns += 1;
125 if self.cx().sess().count_llvm_insns() {
129 .entry(category.to_string())
134 fn set_value_name(&self, value: &'ll Value, name: &str) {
135 let cname = SmallCStr::new(name);
137 llvm::LLVMSetValueName(value, cname.as_ptr());
141 fn position_at_end(&self, llbb: &'ll BasicBlock) {
143 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
147 fn position_at_start(&self, llbb: &'ll BasicBlock) {
149 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
154 self.count_insn("retvoid");
156 llvm::LLVMBuildRetVoid(self.llbuilder);
160 fn ret(&self, v: &'ll Value) {
161 self.count_insn("ret");
163 llvm::LLVMBuildRet(self.llbuilder, v);
167 fn br(&self, dest: &'ll BasicBlock) {
168 self.count_insn("br");
170 llvm::LLVMBuildBr(self.llbuilder, dest);
177 then_llbb: &'ll BasicBlock,
178 else_llbb: &'ll BasicBlock,
180 self.count_insn("condbr");
182 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
189 else_llbb: &'ll BasicBlock,
193 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
200 then: &'ll BasicBlock,
201 catch: &'ll BasicBlock,
202 bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value {
203 self.count_insn("invoke");
205 debug!("Invoke {:?} with args ({:?})",
209 let args = self.check_call("invoke", llfn, args);
210 let bundle = bundle.map(OperandBundleDef::from_generic);
211 let bundle = bundle.as_ref().map(|b| &*b.raw);
214 llvm::LLVMRustBuildInvoke(self.llbuilder,
217 args.len() as c_uint,
225 fn unreachable(&self) {
226 self.count_insn("unreachable");
228 llvm::LLVMBuildUnreachable(self.llbuilder);
233 fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
234 self.count_insn("add");
236 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
240 fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
241 self.count_insn("fadd");
243 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
247 fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
248 self.count_insn("fadd");
250 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
251 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
256 fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
257 self.count_insn("sub");
259 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
263 fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
264 self.count_insn("fsub");
266 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
270 fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
271 self.count_insn("fsub");
273 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
274 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
279 fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
280 self.count_insn("mul");
282 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
286 fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
287 self.count_insn("fmul");
289 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
293 fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
294 self.count_insn("fmul");
296 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
297 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
303 fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
304 self.count_insn("udiv");
306 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
310 fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
311 self.count_insn("exactudiv");
313 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
317 fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
318 self.count_insn("sdiv");
320 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
324 fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
325 self.count_insn("exactsdiv");
327 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
331 fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
332 self.count_insn("fdiv");
334 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
338 fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
339 self.count_insn("fdiv");
341 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
342 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
347 fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
348 self.count_insn("urem");
350 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
354 fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
355 self.count_insn("srem");
357 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
361 fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
362 self.count_insn("frem");
364 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
368 fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
369 self.count_insn("frem");
371 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
372 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
377 fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
378 self.count_insn("shl");
380 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
384 fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
385 self.count_insn("lshr");
387 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
391 fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
392 self.count_insn("ashr");
394 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
398 fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
399 self.count_insn("and");
401 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
405 fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
406 self.count_insn("or");
408 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
412 fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
413 self.count_insn("xor");
415 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
419 fn neg(&self, v: &'ll Value) -> &'ll Value {
420 self.count_insn("neg");
422 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
426 fn fneg(&self, v: &'ll Value) -> &'ll Value {
427 self.count_insn("fneg");
429 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
433 fn not(&self, v: &'ll Value) -> &'ll Value {
434 self.count_insn("not");
436 llvm::LLVMBuildNot(self.llbuilder, v, noname())
440 fn alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value {
441 let bx = Builder::with_cx(self.cx);
442 bx.position_at_start(unsafe {
443 llvm::LLVMGetFirstBasicBlock(self.llfn())
445 bx.dynamic_alloca(ty, name, align)
448 fn dynamic_alloca(&self, ty: Self::Type, name: &str, align: Align) -> &'ll Value {
449 self.count_insn("alloca");
451 let alloca = if name.is_empty() {
452 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
454 let name = SmallCStr::new(name);
455 llvm::LLVMBuildAlloca(self.llbuilder, ty,
458 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
463 fn array_alloca(&self,
467 align: Align) -> &'ll Value {
468 self.count_insn("alloca");
470 let alloca = if name.is_empty() {
471 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
473 let name = SmallCStr::new(name);
474 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
477 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
482 fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
483 self.count_insn("load");
485 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
486 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
491 fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
492 self.count_insn("load.volatile");
494 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
495 llvm::LLVMSetVolatile(insn, llvm::True);
503 order: common::AtomicOrdering,
506 self.count_insn("load.atomic");
508 let load = llvm::LLVMRustBuildAtomicLoad(
512 AtomicOrdering::from_generic(order),
514 // LLVM requires the alignment of atomic loads to be at least the size of the type.
515 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
521 fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
522 if self.sess().target.target.arch == "amdgpu" {
523 // amdgpu/LLVM does something weird and thinks a i64 value is
524 // split into a v2i32, halving the bitwidth LLVM expects,
525 // tripping an assertion. So, for now, just disable this
531 let llty = self.cx.val_ty(load);
533 self.cx.const_uint_big(llty, range.start),
534 self.cx.const_uint_big(llty, range.end)
537 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
538 llvm::LLVMMDNodeInContext(self.cx.llcx,
544 fn nonnull_metadata(&self, load: &'ll Value) {
546 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
547 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
551 fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
552 self.store_with_flags(val, ptr, align, MemFlags::empty())
562 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
563 self.count_insn("store");
564 let ptr = self.check_store(val, ptr);
566 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
567 let align = if flags.contains(MemFlags::UNALIGNED) {
570 align.abi() as c_uint
572 llvm::LLVMSetAlignment(store, align);
573 if flags.contains(MemFlags::VOLATILE) {
574 llvm::LLVMSetVolatile(store, llvm::True);
576 if flags.contains(MemFlags::NONTEMPORAL) {
577 // According to LLVM [1] building a nontemporal store must
578 // *always* point to a metadata value of the integer 1.
580 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
581 let one = self.cx.const_i32(1);
582 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
583 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
589 fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
590 order: common::AtomicOrdering, size: Size) {
591 debug!("Store {:?} -> {:?}", val, ptr);
592 self.count_insn("store.atomic");
593 let ptr = self.check_store(val, ptr);
595 let store = llvm::LLVMRustBuildAtomicStore(
599 AtomicOrdering::from_generic(order),
601 // LLVM requires the alignment of atomic stores to be at least the size of the type.
602 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
606 fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
607 self.count_insn("gep");
609 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
610 indices.len() as c_uint, noname())
614 fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
615 self.count_insn("inboundsgep");
617 llvm::LLVMBuildInBoundsGEP(
618 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
623 fn trunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
624 self.count_insn("trunc");
626 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
630 fn sext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
631 self.count_insn("sext");
633 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
637 fn fptoui(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
638 self.count_insn("fptoui");
640 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
644 fn fptosi(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
645 self.count_insn("fptosi");
647 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
651 fn uitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
652 self.count_insn("uitofp");
654 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
658 fn sitofp(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
659 self.count_insn("sitofp");
661 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
665 fn fptrunc(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
666 self.count_insn("fptrunc");
668 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
672 fn fpext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
673 self.count_insn("fpext");
675 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
679 fn ptrtoint(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
680 self.count_insn("ptrtoint");
682 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
686 fn inttoptr(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
687 self.count_insn("inttoptr");
689 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
693 fn bitcast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
694 self.count_insn("bitcast");
696 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
701 fn intcast(&self, val: &'ll Value, dest_ty: Self::Type, is_signed: bool) -> &'ll Value {
702 self.count_insn("intcast");
704 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
708 fn pointercast(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
709 self.count_insn("pointercast");
711 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
716 fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
717 self.count_insn("icmp");
718 let op = llvm::IntPredicate::from_generic(op);
720 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
724 fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
725 self.count_insn("fcmp");
727 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
731 /* Miscellaneous instructions */
732 fn empty_phi(&self, ty: Self::Type) -> &'ll Value {
733 self.count_insn("emptyphi");
735 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
739 fn phi(&self, ty: Self::Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
740 assert_eq!(vals.len(), bbs.len());
741 let phi = self.empty_phi(ty);
742 self.count_insn("addincoming");
744 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
746 vals.len() as c_uint);
751 fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
752 inputs: &[&'ll Value], output: Self::Type,
753 volatile: bool, alignstack: bool,
754 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
755 self.count_insn("inlineasm");
757 let volatile = if volatile { llvm::True }
758 else { llvm::False };
759 let alignstack = if alignstack { llvm::True }
760 else { llvm::False };
762 let argtys = inputs.iter().map(|v| {
763 debug!("Asm Input Type: {:?}", *v);
765 }).collect::<Vec<_>>();
767 debug!("Asm Output Type: {:?}", output);
768 let fty = &self.cx().func(&argtys[..], output);
770 // Ask LLVM to verify that the constraints are well-formed.
771 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
772 debug!("Constraint verification result: {:?}", constraints_ok);
774 let v = llvm::LLVMRustInlineAsm(
775 fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia));
776 Some(self.call(v, inputs, None))
778 // LLVM has detected an issue with our constraints, bail out
784 fn memcpy(&self, dst: &'ll Value, dst_align: u64,
785 src: &'ll Value, src_align: u64,
786 size: &'ll Value, is_volatile: bool) -> &'ll Value {
788 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align as c_uint,
789 src, src_align as c_uint, size, is_volatile)
793 fn memmove(&self, dst: &'ll Value, dst_align: u64,
794 src: &'ll Value, src_align: u64,
795 size: &'ll Value, is_volatile: bool) -> &'ll Value {
797 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align as c_uint,
798 src, src_align as c_uint, size, is_volatile)
802 fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
803 self.count_insn("minnum");
805 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
806 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
809 fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
810 self.count_insn("maxnum");
812 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
813 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
818 &self, cond: &'ll Value,
819 then_val: &'ll Value,
820 else_val: &'ll Value,
822 self.count_insn("select");
824 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
829 fn va_arg(&self, list: &'ll Value, ty: Self::Type) -> &'ll Value {
830 self.count_insn("vaarg");
832 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
836 fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
837 self.count_insn("extractelement");
839 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
844 &self, vec: &'ll Value,
848 self.count_insn("insertelement");
850 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
854 fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
855 self.count_insn("shufflevector");
857 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
861 fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
863 let elt_ty = self.cx.val_ty(elt);
864 let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64));
865 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
866 let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64);
867 self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
871 fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
872 self.count_insn("vector.reduce.fadd_fast");
874 // FIXME: add a non-fast math version once
875 // https://bugs.llvm.org/show_bug.cgi?id=36732
877 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
878 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
882 fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
883 self.count_insn("vector.reduce.fmul_fast");
885 // FIXME: add a non-fast math version once
886 // https://bugs.llvm.org/show_bug.cgi?id=36732
888 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
889 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
893 fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
894 self.count_insn("vector.reduce.add");
895 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
897 fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
898 self.count_insn("vector.reduce.mul");
899 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
901 fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
902 self.count_insn("vector.reduce.and");
903 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
905 fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
906 self.count_insn("vector.reduce.or");
907 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
909 fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
910 self.count_insn("vector.reduce.xor");
911 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
913 fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
914 self.count_insn("vector.reduce.fmin");
915 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
917 fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
918 self.count_insn("vector.reduce.fmax");
919 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
921 fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
922 self.count_insn("vector.reduce.fmin_fast");
924 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
925 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
929 fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
930 self.count_insn("vector.reduce.fmax_fast");
932 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
933 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
937 fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
938 self.count_insn("vector.reduce.min");
939 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
941 fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
942 self.count_insn("vector.reduce.max");
943 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
946 fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
947 self.count_insn("extractvalue");
948 assert_eq!(idx as c_uint as u64, idx);
950 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
954 fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
955 idx: u64) -> &'ll Value {
956 self.count_insn("insertvalue");
957 assert_eq!(idx as c_uint as u64, idx);
959 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
964 fn landing_pad(&self, ty: Self::Type, pers_fn: &'ll Value,
965 num_clauses: usize) -> &'ll Value {
966 self.count_insn("landingpad");
968 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
969 num_clauses as c_uint, noname())
973 fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
975 llvm::LLVMAddClause(landing_pad, clause);
979 fn set_cleanup(&self, landing_pad: &'ll Value) {
980 self.count_insn("setcleanup");
982 llvm::LLVMSetCleanup(landing_pad, llvm::True);
986 fn resume(&self, exn: &'ll Value) -> &'ll Value {
987 self.count_insn("resume");
989 llvm::LLVMBuildResume(self.llbuilder, exn)
993 fn cleanup_pad(&self,
994 parent: Option<&'ll Value>,
995 args: &[&'ll Value]) -> &'ll Value {
996 self.count_insn("cleanuppad");
997 let name = const_cstr!("cleanuppad");
999 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
1001 args.len() as c_uint,
1005 ret.expect("LLVM does not have support for cleanuppad")
1009 &self, cleanup: &'ll Value,
1010 unwind: Option<&'ll BasicBlock>,
1012 self.count_insn("cleanupret");
1014 llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
1016 ret.expect("LLVM does not have support for cleanupret")
1021 args: &[&'ll Value]) -> &'ll Value {
1022 self.count_insn("catchpad");
1023 let name = const_cstr!("catchpad");
1025 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1026 args.len() as c_uint, args.as_ptr(),
1029 ret.expect("LLVM does not have support for catchpad")
1032 fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value {
1033 self.count_insn("catchret");
1035 llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
1037 ret.expect("LLVM does not have support for catchret")
1042 parent: Option<&'ll Value>,
1043 unwind: Option<&'ll BasicBlock>,
1044 num_handlers: usize,
1046 self.count_insn("catchswitch");
1047 let name = const_cstr!("catchswitch");
1049 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1050 num_handlers as c_uint,
1053 ret.expect("LLVM does not have support for catchswitch")
1056 fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1058 llvm::LLVMRustAddHandler(catch_switch, handler);
1062 fn set_personality_fn(&self, personality: &'ll Value) {
1064 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1068 // Atomic Operations
1074 order: common::AtomicOrdering,
1075 failure_order: common::AtomicOrdering,
1078 let weak = if weak { llvm::True } else { llvm::False };
1080 llvm::LLVMRustBuildAtomicCmpXchg(
1085 AtomicOrdering::from_generic(order),
1086 AtomicOrdering::from_generic(failure_order),
1093 op: common::AtomicRmwBinOp,
1096 order: common::AtomicOrdering,
1099 llvm::LLVMBuildAtomicRMW(
1101 AtomicRmwBinOp::from_generic(op),
1104 AtomicOrdering::from_generic(order),
1109 fn atomic_fence(&self, order: common::AtomicOrdering, scope: common::SynchronizationScope) {
1111 llvm::LLVMRustBuildAtomicFence(
1113 AtomicOrdering::from_generic(order),
1114 SynchronizationScope::from_generic(scope)
1119 fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1121 llvm::LLVMAddCase(s, on_val, dest)
1125 fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1126 self.count_insn("addincoming");
1128 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1132 fn set_invariant_load(&self, load: &'ll Value) {
1134 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1135 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1139 /// Returns the ptr value that should be used for storing `val`.
1140 fn check_store<'b>(&self,
1142 ptr: &'ll Value) -> &'ll Value {
1143 let dest_ptr_ty = self.cx.val_ty(ptr);
1144 let stored_ty = self.cx.val_ty(val);
1145 let stored_ptr_ty = self.cx.ptr_to(stored_ty);
1147 assert_eq!(self.cx.kind(dest_ptr_ty), llvm::TypeKind::Pointer);
1149 if dest_ptr_ty == stored_ptr_ty {
1152 debug!("Type mismatch in store. \
1153 Expected {:?}, got {:?}; inserting bitcast",
1154 dest_ptr_ty, stored_ptr_ty);
1155 self.bitcast(ptr, stored_ptr_ty)
1159 /// Returns the args that should be used for a call to `llfn`.
1160 fn check_call<'b>(&self,
1163 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1164 let mut fn_ty = self.cx.val_ty(llfn);
1165 // Strip off pointers
1166 while self.cx.kind(fn_ty) == llvm::TypeKind::Pointer {
1167 fn_ty = self.cx.element_type(fn_ty);
1170 assert!(self.cx.kind(fn_ty) == llvm::TypeKind::Function,
1171 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1173 let param_tys = self.cx.func_params(fn_ty);
1175 let all_args_match = param_tys.iter()
1176 .zip(args.iter().map(|&v| self.cx().val_ty(v)))
1177 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1180 return Cow::Borrowed(args);
1183 let casted_args: Vec<_> = param_tys.into_iter()
1186 .map(|(i, (expected_ty, &actual_val))| {
1187 let actual_ty = self.cx().val_ty(actual_val);
1188 if expected_ty != actual_ty {
1189 debug!("Type mismatch in function call of {:?}. \
1190 Expected {:?} for param {}, got {:?}; injecting bitcast",
1191 llfn, expected_ty, i, actual_ty);
1192 self.bitcast(actual_val, expected_ty)
1199 Cow::Owned(casted_args)
1202 fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
1203 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1206 fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
1207 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1210 /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
1211 /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
1212 /// and the intrinsic for `lt` and passes them to `emit`, which is in
1213 /// charge of generating code to call the passed intrinsic on whatever
1214 /// block of generated code is targeted for the intrinsic.
1216 /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
1217 /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
1218 fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1219 if self.cx.sess().opts.optimize == config::OptLevel::No {
1223 let size = size.bytes();
1228 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1230 let ptr = self.pointercast(ptr, self.cx.i8p());
1231 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1234 fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
1235 bundle: Option<&common::OperandBundleDef<'ll, &'ll Value>>) -> &'ll Value {
1236 self.count_insn("call");
1238 debug!("Call {:?} with args ({:?})",
1242 let args = self.check_call("call", llfn, args);
1243 let bundle = bundle.map(OperandBundleDef::from_generic);
1244 let bundle = bundle.as_ref().map(|b| &*b.raw);
1247 llvm::LLVMRustBuildCall(
1250 args.as_ptr() as *const &llvm::Value,
1251 args.len() as c_uint,
1257 fn zext(&self, val: &'ll Value, dest_ty: Self::Type) -> &'ll Value {
1258 self.count_insn("zext");
1260 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1264 fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1265 self.count_insn("structgep");
1266 assert_eq!(idx as c_uint as u64, idx);
1268 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1272 fn cx(&self) -> &'a CodegenCx<'ll, 'tcx> {