1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{False, OperandBundleDef};
13 use llvm::{self, BasicBlock};
17 use libc::{c_uint, c_char};
18 use rustc::ty::TyCtxt;
19 use rustc::ty::layout::{Align, Size};
20 use rustc::session::{config, Session};
21 use rustc_data_structures::small_c_str::SmallCStr;
22 use traits::{self, BuilderMethods};
28 // All Builders must have an llfn associated with them
30 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
31 pub llbuilder: &'ll mut llvm::Builder<'ll>,
32 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
35 impl<V> Drop for Builder<'_, '_, '_, V> {
38 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
43 // This is a really awful way to get a zero-length c-string, but better (and a
44 // lot more efficient) than doing str::as_c_str("", ...) every time.
45 fn noname() -> *const c_char {
46 static CNULL: c_char = 0;
51 pub struct MemFlags: u8 {
52 const VOLATILE = 1 << 0;
53 const NONTEMPORAL = 1 << 1;
54 const UNALIGNED = 1 << 2;
58 impl BuilderMethods<'a, 'll, 'tcx, Value, BasicBlock>
59 for Builder<'a, 'll, 'tcx> {
61 cx: &'a CodegenCx<'ll, 'tcx>,
65 let bx = Builder::with_cx(cx);
67 let name = SmallCStr::new(name);
68 llvm::LLVMAppendBasicBlockInContext(
74 bx.position_at_end(llbb);
78 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
79 // Create a fresh builder from the crate context.
80 let llbuilder = unsafe {
81 llvm::LLVMCreateBuilderInContext(cx.llcx)
89 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
90 Builder::new_block(self.cx, self.llfn(), name)
93 fn sess(&self) -> &Session {
97 fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
101 fn llfn(&self) -> &'ll Value {
103 llvm::LLVMGetBasicBlockParent(self.llbb())
107 fn llbb(&self) -> &'ll BasicBlock {
109 llvm::LLVMGetInsertBlock(self.llbuilder)
113 fn count_insn(&self, category: &str) {
114 if self.cx().sess().codegen_stats() {
115 self.cx().stats.borrow_mut().n_llvm_insns += 1;
117 if self.cx().sess().count_llvm_insns() {
121 .entry(category.to_string())
126 fn set_value_name(&self, value: &'ll Value, name: &str) {
127 let cname = SmallCStr::new(name);
129 llvm::LLVMSetValueName(value, cname.as_ptr());
133 fn position_at_end(&self, llbb: &'ll BasicBlock) {
135 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
139 fn position_at_start(&self, llbb: &'ll BasicBlock) {
141 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
146 self.count_insn("retvoid");
148 llvm::LLVMBuildRetVoid(self.llbuilder);
152 fn ret(&self, v: &'ll Value) {
153 self.count_insn("ret");
155 llvm::LLVMBuildRet(self.llbuilder, v);
159 fn br(&self, dest: &'ll BasicBlock) {
160 self.count_insn("br");
162 llvm::LLVMBuildBr(self.llbuilder, dest);
169 then_llbb: &'ll BasicBlock,
170 else_llbb: &'ll BasicBlock,
172 self.count_insn("condbr");
174 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
181 else_llbb: &'ll BasicBlock,
185 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
192 then: &'ll BasicBlock,
193 catch: &'ll BasicBlock,
194 bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value {
195 self.count_insn("invoke");
197 debug!("Invoke {:?} with args ({:?})",
201 let args = self.check_call("invoke", llfn, args);
202 let bundle = bundle.map(|b| &*b.raw);
205 llvm::LLVMRustBuildInvoke(self.llbuilder,
208 args.len() as c_uint,
216 fn unreachable(&self) {
217 self.count_insn("unreachable");
219 llvm::LLVMBuildUnreachable(self.llbuilder);
224 fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
225 self.count_insn("add");
227 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
231 fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
232 self.count_insn("fadd");
234 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
238 fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
239 self.count_insn("fadd");
241 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
242 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
247 fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
248 self.count_insn("sub");
250 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
254 fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
255 self.count_insn("fsub");
257 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
261 fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
262 self.count_insn("fsub");
264 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
265 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
270 fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
271 self.count_insn("mul");
273 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
277 fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
278 self.count_insn("fmul");
280 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
284 fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
285 self.count_insn("fmul");
287 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
288 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
294 fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
295 self.count_insn("udiv");
297 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
301 fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
302 self.count_insn("exactudiv");
304 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
308 fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
309 self.count_insn("sdiv");
311 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
315 fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
316 self.count_insn("exactsdiv");
318 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
322 fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
323 self.count_insn("fdiv");
325 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
329 fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
330 self.count_insn("fdiv");
332 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
333 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
338 fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
339 self.count_insn("urem");
341 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
345 fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
346 self.count_insn("srem");
348 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
352 fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
353 self.count_insn("frem");
355 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
359 fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
360 self.count_insn("frem");
362 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
363 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
368 fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
369 self.count_insn("shl");
371 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
375 fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
376 self.count_insn("lshr");
378 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
382 fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
383 self.count_insn("ashr");
385 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
389 fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
390 self.count_insn("and");
392 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
396 fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
397 self.count_insn("or");
399 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
403 fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
404 self.count_insn("xor");
406 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
410 fn neg(&self, v: &'ll Value) -> &'ll Value {
411 self.count_insn("neg");
413 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
417 fn fneg(&self, v: &'ll Value) -> &'ll Value {
418 self.count_insn("fneg");
420 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
424 fn not(&self, v: &'ll Value) -> &'ll Value {
425 self.count_insn("not");
427 llvm::LLVMBuildNot(self.llbuilder, v, noname())
431 fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
432 let bx = Builder::with_cx(self.cx);
433 bx.position_at_start(unsafe {
434 llvm::LLVMGetFirstBasicBlock(self.llfn())
436 bx.dynamic_alloca(ty, name, align)
439 fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
440 self.count_insn("alloca");
442 let alloca = if name.is_empty() {
443 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
445 let name = SmallCStr::new(name);
446 llvm::LLVMBuildAlloca(self.llbuilder, ty,
449 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
454 fn array_alloca(&self,
458 align: Align) -> &'ll Value {
459 self.count_insn("alloca");
461 let alloca = if name.is_empty() {
462 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
464 let name = SmallCStr::new(name);
465 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
468 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
473 fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value {
474 self.count_insn("load");
476 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
477 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
482 fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value {
483 self.count_insn("load.volatile");
485 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
486 llvm::LLVMSetVolatile(insn, llvm::True);
491 fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, size: Size) -> &'ll Value {
492 self.count_insn("load.atomic");
494 let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order);
495 // LLVM requires the alignment of atomic loads to be at least the size of the type.
496 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
502 fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
503 if self.sess().target.target.arch == "amdgpu" {
504 // amdgpu/LLVM does something weird and thinks a i64 value is
505 // split into a v2i32, halving the bitwidth LLVM expects,
506 // tripping an assertion. So, for now, just disable this
512 let llty = val_ty(load);
514 C_uint_big(llty, range.start),
515 C_uint_big(llty, range.end)
518 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
519 llvm::LLVMMDNodeInContext(self.cx.llcx,
525 fn nonnull_metadata(&self, load: &'ll Value) {
527 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
528 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
532 fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
533 self.store_with_flags(val, ptr, align, MemFlags::empty())
543 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
544 self.count_insn("store");
545 let ptr = self.check_store(val, ptr);
547 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
548 let align = if flags.contains(MemFlags::UNALIGNED) {
551 align.abi() as c_uint
553 llvm::LLVMSetAlignment(store, align);
554 if flags.contains(MemFlags::VOLATILE) {
555 llvm::LLVMSetVolatile(store, llvm::True);
557 if flags.contains(MemFlags::NONTEMPORAL) {
558 // According to LLVM [1] building a nontemporal store must
559 // *always* point to a metadata value of the integer 1.
561 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
562 let one = C_i32(self.cx, 1);
563 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
564 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
570 fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
571 order: AtomicOrdering, size: Size) {
572 debug!("Store {:?} -> {:?}", val, ptr);
573 self.count_insn("store.atomic");
574 let ptr = self.check_store(val, ptr);
576 let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order);
577 // LLVM requires the alignment of atomic stores to be at least the size of the type.
578 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
582 fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
583 self.count_insn("gep");
585 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
586 indices.len() as c_uint, noname())
590 fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
591 self.count_insn("inboundsgep");
593 llvm::LLVMBuildInBoundsGEP(
594 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
599 fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
600 self.count_insn("trunc");
602 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
606 fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
607 self.count_insn("sext");
609 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
613 fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
614 self.count_insn("fptoui");
616 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
620 fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
621 self.count_insn("fptosi");
623 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
627 fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
628 self.count_insn("uitofp");
630 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
634 fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
635 self.count_insn("sitofp");
637 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
641 fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
642 self.count_insn("fptrunc");
644 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
648 fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
649 self.count_insn("fpext");
651 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
655 fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
656 self.count_insn("ptrtoint");
658 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
662 fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
663 self.count_insn("inttoptr");
665 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
669 fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
670 self.count_insn("bitcast");
672 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
677 fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
678 self.count_insn("intcast");
680 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
684 fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
685 self.count_insn("pointercast");
687 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
692 fn icmp(&self, op: traits::IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
693 self.count_insn("icmp");
694 let op : llvm::IntPredicate = traits::IntPredicateMethods::convert_to_backend_specific(op);
696 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
700 fn fcmp(&self, op: traits::RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
701 self.count_insn("fcmp");
703 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
707 /* Miscellaneous instructions */
708 fn empty_phi(&self, ty: &'ll Type) -> &'ll Value {
709 self.count_insn("emptyphi");
711 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
715 fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
716 assert_eq!(vals.len(), bbs.len());
717 let phi = self.empty_phi(ty);
718 self.count_insn("addincoming");
720 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
722 vals.len() as c_uint);
727 fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
728 inputs: &[&'ll Value], output: &'ll Type,
729 volatile: bool, alignstack: bool,
730 dia: AsmDialect) -> Option<&'ll Value> {
731 self.count_insn("inlineasm");
733 let volatile = if volatile { llvm::True }
734 else { llvm::False };
735 let alignstack = if alignstack { llvm::True }
736 else { llvm::False };
738 let argtys = inputs.iter().map(|v| {
739 debug!("Asm Input Type: {:?}", *v);
741 }).collect::<Vec<_>>();
743 debug!("Asm Output Type: {:?}", output);
744 let fty = Type::func::<Value>(&argtys[..], output);
746 // Ask LLVM to verify that the constraints are well-formed.
747 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
748 debug!("Constraint verification result: {:?}", constraints_ok);
750 let v = llvm::LLVMRustInlineAsm(
751 fty, asm, cons, volatile, alignstack, dia);
752 Some(self.call(v, inputs, None))
754 // LLVM has detected an issue with our constraints, bail out
760 fn memcpy(&self, dst: &'ll Value, dst_align: u64,
761 src: &'ll Value, src_align: u64,
762 size: &'ll Value, is_volatile: bool) -> &'ll Value {
764 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align as c_uint,
765 src, src_align as c_uint, size, is_volatile)
769 fn memmove(&self, dst: &'ll Value, dst_align: u64,
770 src: &'ll Value, src_align: u64,
771 size: &'ll Value, is_volatile: bool) -> &'ll Value {
773 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align as c_uint,
774 src, src_align as c_uint, size, is_volatile)
778 fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
779 self.count_insn("minnum");
781 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
782 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
785 fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
786 self.count_insn("maxnum");
788 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
789 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
794 &self, cond: &'ll Value,
795 then_val: &'ll Value,
796 else_val: &'ll Value,
798 self.count_insn("select");
800 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
805 fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
806 self.count_insn("vaarg");
808 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
812 fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
813 self.count_insn("extractelement");
815 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
820 &self, vec: &'ll Value,
824 self.count_insn("insertelement");
826 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
830 fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
831 self.count_insn("shufflevector");
833 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
837 fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
839 let elt_ty = val_ty(elt);
840 let undef = llvm::LLVMGetUndef(Type::vector::<Value>(elt_ty, num_elts as u64));
841 let vec = self.insert_element(undef, elt, C_i32(self.cx, 0));
842 let vec_i32_ty = Type::vector::<Value>(Type::i32(self.cx), num_elts as u64);
843 self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
847 fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
848 self.count_insn("vector.reduce.fadd_fast");
850 // FIXME: add a non-fast math version once
851 // https://bugs.llvm.org/show_bug.cgi?id=36732
853 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
854 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
858 fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
859 self.count_insn("vector.reduce.fmul_fast");
861 // FIXME: add a non-fast math version once
862 // https://bugs.llvm.org/show_bug.cgi?id=36732
864 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
865 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
869 fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
870 self.count_insn("vector.reduce.add");
871 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
873 fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
874 self.count_insn("vector.reduce.mul");
875 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
877 fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
878 self.count_insn("vector.reduce.and");
879 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
881 fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
882 self.count_insn("vector.reduce.or");
883 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
885 fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
886 self.count_insn("vector.reduce.xor");
887 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
889 fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
890 self.count_insn("vector.reduce.fmin");
891 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
893 fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
894 self.count_insn("vector.reduce.fmax");
895 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
897 fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
898 self.count_insn("vector.reduce.fmin_fast");
900 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
901 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
905 fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
906 self.count_insn("vector.reduce.fmax_fast");
908 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
909 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
913 fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
914 self.count_insn("vector.reduce.min");
915 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
917 fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
918 self.count_insn("vector.reduce.max");
919 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
922 fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
923 self.count_insn("extractvalue");
924 assert_eq!(idx as c_uint as u64, idx);
926 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
930 fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value,
931 idx: u64) -> &'ll Value {
932 self.count_insn("insertvalue");
933 assert_eq!(idx as c_uint as u64, idx);
935 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
940 fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value,
941 num_clauses: usize) -> &'ll Value {
942 self.count_insn("landingpad");
944 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
945 num_clauses as c_uint, noname())
949 fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) {
951 llvm::LLVMAddClause(landing_pad, clause);
955 fn set_cleanup(&self, landing_pad: &'ll Value) {
956 self.count_insn("setcleanup");
958 llvm::LLVMSetCleanup(landing_pad, llvm::True);
962 fn resume(&self, exn: &'ll Value) -> &'ll Value {
963 self.count_insn("resume");
965 llvm::LLVMBuildResume(self.llbuilder, exn)
969 fn cleanup_pad(&self,
970 parent: Option<&'ll Value>,
971 args: &[&'ll Value]) -> &'ll Value {
972 self.count_insn("cleanuppad");
973 let name = const_cstr!("cleanuppad");
975 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
977 args.len() as c_uint,
981 ret.expect("LLVM does not have support for cleanuppad")
985 &self, cleanup: &'ll Value,
986 unwind: Option<&'ll BasicBlock>,
988 self.count_insn("cleanupret");
990 llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind)
992 ret.expect("LLVM does not have support for cleanupret")
997 args: &[&'ll Value]) -> &'ll Value {
998 self.count_insn("catchpad");
999 let name = const_cstr!("catchpad");
1001 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1002 args.len() as c_uint, args.as_ptr(),
1005 ret.expect("LLVM does not have support for catchpad")
1008 fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value {
1009 self.count_insn("catchret");
1011 llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind)
1013 ret.expect("LLVM does not have support for catchret")
1018 parent: Option<&'ll Value>,
1019 unwind: Option<&'ll BasicBlock>,
1020 num_handlers: usize,
1022 self.count_insn("catchswitch");
1023 let name = const_cstr!("catchswitch");
1025 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1026 num_handlers as c_uint,
1029 ret.expect("LLVM does not have support for catchswitch")
1032 fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1034 llvm::LLVMRustAddHandler(catch_switch, handler);
1038 fn set_personality_fn(&self, personality: &'ll Value) {
1040 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1044 // Atomic Operations
1050 order: AtomicOrdering,
1051 failure_order: AtomicOrdering,
1054 let weak = if weak { llvm::True } else { llvm::False };
1056 llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
1057 order, failure_order, weak)
1065 order: AtomicOrdering,
1068 llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False)
1072 fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) {
1074 llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope);
1078 fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1080 llvm::LLVMAddCase(s, on_val, dest)
1084 fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1085 self.count_insn("addincoming");
1087 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1091 fn set_invariant_load(&self, load: &'ll Value) {
1093 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1094 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1098 /// Returns the ptr value that should be used for storing `val`.
1099 fn check_store<'b>(&self,
1101 ptr: &'ll Value) -> &'ll Value {
1102 let dest_ptr_ty = val_ty(ptr);
1103 let stored_ty = val_ty(val);
1104 let stored_ptr_ty = stored_ty.ptr_to();
1106 assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer);
1108 if dest_ptr_ty == stored_ptr_ty {
1111 debug!("Type mismatch in store. \
1112 Expected {:?}, got {:?}; inserting bitcast",
1113 dest_ptr_ty, stored_ptr_ty);
1114 self.bitcast(ptr, stored_ptr_ty)
1118 /// Returns the args that should be used for a call to `llfn`.
1119 fn check_call<'b>(&self,
1122 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1123 let mut fn_ty = val_ty(llfn);
1124 // Strip off pointers
1125 while fn_ty.kind() == llvm::TypeKind::Pointer {
1126 fn_ty = fn_ty.element_type();
1129 assert!(fn_ty.kind() == llvm::TypeKind::Function,
1130 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1132 let param_tys = fn_ty.func_params();
1134 let all_args_match = param_tys.iter()
1135 .zip(args.iter().map(|&v| val_ty(v)))
1136 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1139 return Cow::Borrowed(args);
1142 let casted_args: Vec<_> = param_tys.into_iter()
1145 .map(|(i, (expected_ty, &actual_val))| {
1146 let actual_ty = val_ty(actual_val);
1147 if expected_ty != actual_ty {
1148 debug!("Type mismatch in function call of {:?}. \
1149 Expected {:?} for param {}, got {:?}; injecting bitcast",
1150 llfn, expected_ty, i, actual_ty);
1151 self.bitcast(actual_val, expected_ty)
1158 Cow::Owned(casted_args)
1161 fn lifetime_start(&self, ptr: &'ll Value, size: Size) {
1162 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1165 fn lifetime_end(&self, ptr: &'ll Value, size: Size) {
1166 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1169 /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
1170 /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
1171 /// and the intrinsic for `lt` and passes them to `emit`, which is in
1172 /// charge of generating code to call the passed intrinsic on whatever
1173 /// block of generated code is targeted for the intrinsic.
1175 /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
1176 /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
1177 fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1178 if self.cx.sess().opts.optimize == config::OptLevel::No {
1182 let size = size.bytes();
1187 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1189 let ptr = self.pointercast(ptr, Type::i8p(self.cx));
1190 self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None);
1193 fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
1194 bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value {
1195 self.count_insn("call");
1197 debug!("Call {:?} with args ({:?})",
1201 let args = self.check_call("call", llfn, args);
1202 let bundle = bundle.map(|b| &*b.raw);
1205 llvm::LLVMRustBuildCall(
1208 args.as_ptr() as *const &llvm::Value,
1209 args.len() as c_uint,
1215 fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1216 self.count_insn("zext");
1218 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1222 fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1223 self.count_insn("structgep");
1224 assert_eq!(idx as c_uint as u64, idx);
1226 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1230 fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value> {