1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
12 use llvm::{self, False, BasicBlock};
13 use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate};
14 use rustc_codegen_ssa::{self, MemFlags};
16 use context::CodegenCx;
18 use type_of::LayoutLlvmExt;
20 use libc::{c_uint, c_char};
21 use rustc::ty::{self, Ty, TyCtxt};
22 use rustc::ty::layout::{self, Align, Size, TyLayout};
23 use rustc::session::config;
24 use rustc_data_structures::small_c_str::SmallCStr;
25 use rustc_codegen_ssa::interfaces::*;
27 use rustc_codegen_ssa::base::to_immediate;
28 use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef};
29 use rustc_codegen_ssa::mir::place::PlaceRef;
34 // All Builders must have an llfn associated with them
36 pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll = &'ll Value> {
37 pub llbuilder: &'ll mut llvm::Builder<'ll>,
38 pub cx: &'a CodegenCx<'ll, 'tcx, V>,
41 impl<V> Drop for Builder<'a, 'll, 'tcx, V> {
44 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
49 // This is a really awful way to get a zero-length c-string, but better (and a
50 // lot more efficient) than doing str::as_c_str("", ...) every time.
51 fn noname() -> *const c_char {
52 static CNULL: c_char = 0;
56 impl BackendTypes for Builder<'_, 'll, 'tcx> {
57 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
58 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
59 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
60 type Context = <CodegenCx<'ll, 'tcx> as BackendTypes>::Context;
61 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
63 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
66 impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
67 fn data_layout(&self) -> &ty::layout::TargetDataLayout {
72 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
73 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
78 impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
80 type TyLayout = TyLayout<'tcx>;
82 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
88 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
89 type CodegenCx = CodegenCx<'ll, 'tcx>;
92 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
94 cx: &'a CodegenCx<'ll, 'tcx>,
98 let mut bx = Builder::with_cx(cx);
100 let name = SmallCStr::new(name);
101 llvm::LLVMAppendBasicBlockInContext(
107 bx.position_at_end(llbb);
111 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
112 // Create a fresh builder from the crate context.
113 let llbuilder = unsafe {
114 llvm::LLVMCreateBuilderInContext(cx.llcx)
122 fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
123 Builder::new_block(self.cx, self.llfn(), name)
126 fn llfn(&self) -> &'ll Value {
128 llvm::LLVMGetBasicBlockParent(self.llbb())
132 fn llbb(&self) -> &'ll BasicBlock {
134 llvm::LLVMGetInsertBlock(self.llbuilder)
138 fn count_insn(&self, category: &str) {
139 if self.cx().sess().codegen_stats() {
140 self.cx().stats.borrow_mut().n_llvm_insns += 1;
142 if self.cx().sess().count_llvm_insns() {
146 .entry(category.to_string())
151 fn set_value_name(&mut self, value: &'ll Value, name: &str) {
152 let cname = SmallCStr::new(name);
154 llvm::LLVMSetValueName(value, cname.as_ptr());
158 fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
160 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
164 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
166 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
170 fn ret_void(&mut self) {
171 self.count_insn("retvoid");
173 llvm::LLVMBuildRetVoid(self.llbuilder);
177 fn ret(&mut self, v: &'ll Value) {
178 self.count_insn("ret");
180 llvm::LLVMBuildRet(self.llbuilder, v);
184 fn br(&mut self, dest: &'ll BasicBlock) {
185 self.count_insn("br");
187 llvm::LLVMBuildBr(self.llbuilder, dest);
194 then_llbb: &'ll BasicBlock,
195 else_llbb: &'ll BasicBlock,
197 self.count_insn("condbr");
199 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
206 else_llbb: &'ll BasicBlock,
210 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint)
218 then: &'ll BasicBlock,
219 catch: &'ll BasicBlock,
220 funclet: Option<&Funclet<'ll>>,
222 self.count_insn("invoke");
224 debug!("Invoke {:?} with args ({:?})",
228 let args = self.check_call("invoke", llfn, args);
229 let bundle = funclet.map(|funclet| funclet.bundle());
230 let bundle = bundle.as_ref().map(|b| &*b.raw);
233 llvm::LLVMRustBuildInvoke(self.llbuilder,
236 args.len() as c_uint,
244 fn unreachable(&mut self) {
245 self.count_insn("unreachable");
247 llvm::LLVMBuildUnreachable(self.llbuilder);
252 fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
253 self.count_insn("add");
255 llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname())
259 fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
260 self.count_insn("fadd");
262 llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname())
266 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
267 self.count_insn("fadd");
269 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname());
270 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
275 fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
276 self.count_insn("sub");
278 llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname())
282 fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
283 self.count_insn("fsub");
285 llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname())
289 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
290 self.count_insn("fsub");
292 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname());
293 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
298 fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
299 self.count_insn("mul");
301 llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname())
305 fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
306 self.count_insn("fmul");
308 llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname())
312 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
313 self.count_insn("fmul");
315 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname());
316 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
322 fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
323 self.count_insn("udiv");
325 llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname())
329 fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
330 self.count_insn("exactudiv");
332 llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname())
336 fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
337 self.count_insn("sdiv");
339 llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname())
343 fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
344 self.count_insn("exactsdiv");
346 llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname())
350 fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
351 self.count_insn("fdiv");
353 llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname())
357 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
358 self.count_insn("fdiv");
360 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname());
361 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
366 fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
367 self.count_insn("urem");
369 llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname())
373 fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
374 self.count_insn("srem");
376 llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname())
380 fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
381 self.count_insn("frem");
383 llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname())
387 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
388 self.count_insn("frem");
390 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname());
391 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
396 fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
397 self.count_insn("shl");
399 llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname())
403 fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
404 self.count_insn("lshr");
406 llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname())
410 fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
411 self.count_insn("ashr");
413 llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname())
417 fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
418 self.count_insn("and");
420 llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname())
424 fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
425 self.count_insn("or");
427 llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname())
431 fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
432 self.count_insn("xor");
434 llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname())
438 fn neg(&mut self, v: &'ll Value) -> &'ll Value {
439 self.count_insn("neg");
441 llvm::LLVMBuildNeg(self.llbuilder, v, noname())
445 fn fneg(&mut self, v: &'ll Value) -> &'ll Value {
446 self.count_insn("fneg");
448 llvm::LLVMBuildFNeg(self.llbuilder, v, noname())
452 fn not(&mut self, v: &'ll Value) -> &'ll Value {
453 self.count_insn("not");
455 llvm::LLVMBuildNot(self.llbuilder, v, noname())
459 fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
460 let mut bx = Builder::with_cx(self.cx);
461 bx.position_at_start(unsafe {
462 llvm::LLVMGetFirstBasicBlock(self.llfn())
464 bx.dynamic_alloca(ty, name, align)
467 fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
468 self.count_insn("alloca");
470 let alloca = if name.is_empty() {
471 llvm::LLVMBuildAlloca(self.llbuilder, ty, noname())
473 let name = SmallCStr::new(name);
474 llvm::LLVMBuildAlloca(self.llbuilder, ty,
477 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
482 fn array_alloca(&mut self,
486 align: Align) -> &'ll Value {
487 self.count_insn("alloca");
489 let alloca = if name.is_empty() {
490 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname())
492 let name = SmallCStr::new(name);
493 llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
496 llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
501 fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
502 self.count_insn("load");
504 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
505 llvm::LLVMSetAlignment(load, align.abi() as c_uint);
510 fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
511 self.count_insn("load.volatile");
513 let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
514 llvm::LLVMSetVolatile(insn, llvm::True);
522 order: rustc_codegen_ssa::common::AtomicOrdering,
525 self.count_insn("load.atomic");
527 let load = llvm::LLVMRustBuildAtomicLoad(
531 AtomicOrdering::from_generic(order),
533 // LLVM requires the alignment of atomic loads to be at least the size of the type.
534 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
541 place: PlaceRef<'tcx, &'ll Value>
542 ) -> OperandRef<'tcx, &'ll Value> {
543 debug!("PlaceRef::load: {:?}", place);
545 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
547 if place.layout.is_zst() {
548 return OperandRef::new_zst(self.cx(), place.layout);
551 fn scalar_load_metadata<'a, 'll, 'tcx>(
552 bx: &mut Builder<'a, 'll, 'tcx>,
554 scalar: &layout::Scalar
556 let vr = scalar.valid_range.clone();
559 let range = scalar.valid_range_exclusive(bx.cx());
560 if range.start != range.end {
561 bx.range_metadata(load, range);
564 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
565 bx.nonnull_metadata(load);
571 let val = if let Some(llextra) = place.llextra {
572 OperandValue::Ref(place.llval, Some(llextra), place.align)
573 } else if place.layout.is_llvm_immediate() {
574 let mut const_llval = None;
576 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
577 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
578 const_llval = llvm::LLVMGetInitializer(global);
582 let llval = const_llval.unwrap_or_else(|| {
583 let load = self.load(place.llval, place.align);
584 if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
585 scalar_load_metadata(self, load, scalar);
589 OperandValue::Immediate(to_immediate(self, llval, place.layout))
590 } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
591 let mut load = |i, scalar: &layout::Scalar| {
592 let llptr = self.struct_gep(place.llval, i as u64);
593 let load = self.load(llptr, place.align);
594 scalar_load_metadata(self, load, scalar);
595 if scalar.is_bool() {
596 self.trunc(load, self.cx().type_i1())
601 OperandValue::Pair(load(0, a), load(1, b))
603 OperandValue::Ref(place.llval, None, place.align)
606 OperandRef { val, layout: place.layout }
611 fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
612 if self.cx().sess().target.target.arch == "amdgpu" {
613 // amdgpu/LLVM does something weird and thinks a i64 value is
614 // split into a v2i32, halving the bitwidth LLVM expects,
615 // tripping an assertion. So, for now, just disable this
621 let llty = self.cx.val_ty(load);
623 self.cx.const_uint_big(llty, range.start),
624 self.cx.const_uint_big(llty, range.end)
627 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
628 llvm::LLVMMDNodeInContext(self.cx.llcx,
634 fn nonnull_metadata(&mut self, load: &'ll Value) {
636 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
637 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
641 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
642 self.store_with_flags(val, ptr, align, MemFlags::empty())
652 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
653 self.count_insn("store");
654 let ptr = self.check_store(val, ptr);
656 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
657 let align = if flags.contains(MemFlags::UNALIGNED) {
660 align.abi() as c_uint
662 llvm::LLVMSetAlignment(store, align);
663 if flags.contains(MemFlags::VOLATILE) {
664 llvm::LLVMSetVolatile(store, llvm::True);
666 if flags.contains(MemFlags::NONTEMPORAL) {
667 // According to LLVM [1] building a nontemporal store must
668 // *always* point to a metadata value of the integer 1.
670 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
671 let one = self.cx.const_i32(1);
672 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
673 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
679 fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value,
680 order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) {
681 debug!("Store {:?} -> {:?}", val, ptr);
682 self.count_insn("store.atomic");
683 let ptr = self.check_store(val, ptr);
685 let store = llvm::LLVMRustBuildAtomicStore(
689 AtomicOrdering::from_generic(order),
691 // LLVM requires the alignment of atomic stores to be at least the size of the type.
692 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
696 fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
697 self.count_insn("gep");
699 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
700 indices.len() as c_uint, noname())
704 fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
705 self.count_insn("inboundsgep");
707 llvm::LLVMBuildInBoundsGEP(
708 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname())
713 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
714 self.count_insn("trunc");
716 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname())
720 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
721 self.count_insn("sext");
723 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname())
727 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
728 self.count_insn("fptoui");
730 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname())
734 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
735 self.count_insn("fptosi");
737 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname())
741 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
742 self.count_insn("uitofp");
744 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname())
748 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
749 self.count_insn("sitofp");
751 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname())
755 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
756 self.count_insn("fptrunc");
758 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname())
762 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
763 self.count_insn("fpext");
765 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname())
769 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
770 self.count_insn("ptrtoint");
772 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
776 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
777 self.count_insn("inttoptr");
779 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
783 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
784 self.count_insn("bitcast");
786 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
791 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
792 self.count_insn("intcast");
794 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
798 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
799 self.count_insn("pointercast");
801 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
806 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
807 self.count_insn("icmp");
808 let op = llvm::IntPredicate::from_generic(op);
810 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
814 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
815 self.count_insn("fcmp");
817 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
821 /* Miscellaneous instructions */
822 fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value {
823 self.count_insn("emptyphi");
825 llvm::LLVMBuildPhi(self.llbuilder, ty, noname())
829 fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
830 assert_eq!(vals.len(), bbs.len());
831 let phi = self.empty_phi(ty);
832 self.count_insn("addincoming");
834 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
836 vals.len() as c_uint);
841 fn inline_asm_call(&mut self, asm: *const c_char, cons: *const c_char,
842 inputs: &[&'ll Value], output: &'ll Type,
843 volatile: bool, alignstack: bool,
844 dia: syntax::ast::AsmDialect) -> Option<&'ll Value> {
845 self.count_insn("inlineasm");
847 let volatile = if volatile { llvm::True }
848 else { llvm::False };
849 let alignstack = if alignstack { llvm::True }
850 else { llvm::False };
852 let argtys = inputs.iter().map(|v| {
853 debug!("Asm Input Type: {:?}", *v);
855 }).collect::<Vec<_>>();
857 debug!("Asm Output Type: {:?}", output);
858 let fty = self.cx().type_func(&argtys[..], output);
860 // Ask LLVM to verify that the constraints are well-formed.
861 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
862 debug!("Constraint verification result: {:?}", constraints_ok);
864 let v = llvm::LLVMRustInlineAsm(
865 fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia));
866 Some(self.call(v, inputs, None))
868 // LLVM has detected an issue with our constraints, bail out
874 fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
875 src: &'ll Value, src_align: Align,
876 size: &'ll Value, flags: MemFlags) {
877 if flags.contains(MemFlags::NONTEMPORAL) {
878 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
879 let val = self.load(src, src_align);
880 let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
881 self.store_with_flags(val, ptr, dst_align, flags);
884 let size = self.intcast(size, self.cx().type_isize(), false);
885 let is_volatile = flags.contains(MemFlags::VOLATILE);
886 let dst = self.pointercast(dst, self.cx().type_i8p());
887 let src = self.pointercast(src, self.cx().type_i8p());
889 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint,
890 src, src_align.abi() as c_uint, size, is_volatile);
894 fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
895 src: &'ll Value, src_align: Align,
896 size: &'ll Value, flags: MemFlags) {
897 if flags.contains(MemFlags::NONTEMPORAL) {
898 // HACK(nox): This is inefficient but there is no nontemporal memmove.
899 let val = self.load(src, src_align);
900 let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
901 self.store_with_flags(val, ptr, dst_align, flags);
904 let size = self.intcast(size, self.cx().type_isize(), false);
905 let is_volatile = flags.contains(MemFlags::VOLATILE);
906 let dst = self.pointercast(dst, self.cx().type_i8p());
907 let src = self.pointercast(src, self.cx().type_i8p());
909 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint,
910 src, src_align.abi() as c_uint, size, is_volatile);
917 fill_byte: &'ll Value,
922 let ptr_width = &self.cx().sess().target.target.target_pointer_width;
923 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
924 let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
925 let ptr = self.pointercast(ptr, self.cx().type_i8p());
926 let align = self.cx().const_u32(align.abi() as u32);
927 let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
928 self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
931 fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
932 self.count_insn("minnum");
934 let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs);
935 instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0")
938 fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
939 self.count_insn("maxnum");
941 let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs);
942 instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0")
947 &mut self, cond: &'ll Value,
948 then_val: &'ll Value,
949 else_val: &'ll Value,
951 self.count_insn("select");
953 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname())
958 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
959 self.count_insn("vaarg");
961 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname())
965 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
966 self.count_insn("extractelement");
968 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname())
973 &mut self, vec: &'ll Value,
977 self.count_insn("insertelement");
979 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname())
983 fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value {
984 self.count_insn("shufflevector");
986 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname())
990 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
992 let elt_ty = self.cx.val_ty(elt);
993 let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
994 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
995 let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64);
996 self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
1000 fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1001 self.count_insn("vector.reduce.fadd_fast");
1003 // FIXME: add a non-fast math version once
1004 // https://bugs.llvm.org/show_bug.cgi?id=36732
1006 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1007 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1011 fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1012 self.count_insn("vector.reduce.fmul_fast");
1014 // FIXME: add a non-fast math version once
1015 // https://bugs.llvm.org/show_bug.cgi?id=36732
1017 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1018 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1022 fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1023 self.count_insn("vector.reduce.add");
1024 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1026 fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1027 self.count_insn("vector.reduce.mul");
1028 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1030 fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1031 self.count_insn("vector.reduce.and");
1032 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1034 fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1035 self.count_insn("vector.reduce.or");
1036 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1038 fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1039 self.count_insn("vector.reduce.xor");
1040 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1042 fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1043 self.count_insn("vector.reduce.fmin");
1044 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
1046 fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1047 self.count_insn("vector.reduce.fmax");
1048 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
1050 fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1051 self.count_insn("vector.reduce.fmin_fast");
1053 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1054 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1058 fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1059 self.count_insn("vector.reduce.fmax_fast");
1061 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1062 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1066 fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1067 self.count_insn("vector.reduce.min");
1068 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1070 fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1071 self.count_insn("vector.reduce.max");
1072 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1075 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
1076 self.count_insn("extractvalue");
1077 assert_eq!(idx as c_uint as u64, idx);
1079 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
1083 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value,
1084 idx: u64) -> &'ll Value {
1085 self.count_insn("insertvalue");
1086 assert_eq!(idx as c_uint as u64, idx);
1088 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
1093 fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value,
1094 num_clauses: usize) -> &'ll Value {
1095 self.count_insn("landingpad");
1097 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
1098 num_clauses as c_uint, noname())
1102 fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1104 llvm::LLVMAddClause(landing_pad, clause);
1108 fn set_cleanup(&mut self, landing_pad: &'ll Value) {
1109 self.count_insn("setcleanup");
1111 llvm::LLVMSetCleanup(landing_pad, llvm::True);
1115 fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
1116 self.count_insn("resume");
1118 llvm::LLVMBuildResume(self.llbuilder, exn)
1122 fn cleanup_pad(&mut self,
1123 parent: Option<&'ll Value>,
1124 args: &[&'ll Value]) -> Funclet<'ll> {
1125 self.count_insn("cleanuppad");
1126 let name = const_cstr!("cleanuppad");
1128 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
1130 args.len() as c_uint,
1134 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
1138 &mut self, funclet: &Funclet<'ll>,
1139 unwind: Option<&'ll BasicBlock>,
1141 self.count_insn("cleanupret");
1143 llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
1145 ret.expect("LLVM does not have support for cleanupret")
1148 fn catch_pad(&mut self,
1150 args: &[&'ll Value]) -> Funclet<'ll> {
1151 self.count_insn("catchpad");
1152 let name = const_cstr!("catchpad");
1154 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
1155 args.len() as c_uint, args.as_ptr(),
1158 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
1161 fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1162 self.count_insn("catchret");
1164 llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind)
1166 ret.expect("LLVM does not have support for catchret")
1171 parent: Option<&'ll Value>,
1172 unwind: Option<&'ll BasicBlock>,
1173 num_handlers: usize,
1175 self.count_insn("catchswitch");
1176 let name = const_cstr!("catchswitch");
1178 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
1179 num_handlers as c_uint,
1182 ret.expect("LLVM does not have support for catchswitch")
1185 fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1187 llvm::LLVMRustAddHandler(catch_switch, handler);
1191 fn set_personality_fn(&mut self, personality: &'ll Value) {
1193 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1197 // Atomic Operations
1203 order: rustc_codegen_ssa::common::AtomicOrdering,
1204 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
1207 let weak = if weak { llvm::True } else { llvm::False };
1209 llvm::LLVMRustBuildAtomicCmpXchg(
1214 AtomicOrdering::from_generic(order),
1215 AtomicOrdering::from_generic(failure_order),
1222 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
1225 order: rustc_codegen_ssa::common::AtomicOrdering,
1228 llvm::LLVMBuildAtomicRMW(
1230 AtomicRmwBinOp::from_generic(op),
1233 AtomicOrdering::from_generic(order),
1240 order: rustc_codegen_ssa::common::AtomicOrdering,
1241 scope: rustc_codegen_ssa::common::SynchronizationScope
1244 llvm::LLVMRustBuildAtomicFence(
1246 AtomicOrdering::from_generic(order),
1247 SynchronizationScope::from_generic(scope)
1252 fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) {
1254 llvm::LLVMAddCase(s, on_val, dest)
1258 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1259 self.count_insn("addincoming");
1261 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1265 fn set_invariant_load(&mut self, load: &'ll Value) {
1267 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1268 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1272 fn check_store<'b>(&mut self,
1274 ptr: &'ll Value) -> &'ll Value {
1275 let dest_ptr_ty = self.cx.val_ty(ptr);
1276 let stored_ty = self.cx.val_ty(val);
1277 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1279 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1281 if dest_ptr_ty == stored_ptr_ty {
1284 debug!("Type mismatch in store. \
1285 Expected {:?}, got {:?}; inserting bitcast",
1286 dest_ptr_ty, stored_ptr_ty);
1287 self.bitcast(ptr, stored_ptr_ty)
1291 fn check_call<'b>(&mut self,
1294 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1295 let mut fn_ty = self.cx.val_ty(llfn);
1296 // Strip off pointers
1297 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1298 fn_ty = self.cx.element_type(fn_ty);
1301 assert!(self.cx.type_kind(fn_ty) == TypeKind::Function,
1302 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1304 let param_tys = self.cx.func_params_types(fn_ty);
1306 let all_args_match = param_tys.iter()
1307 .zip(args.iter().map(|&v| self.cx().val_ty(v)))
1308 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1311 return Cow::Borrowed(args);
1314 let casted_args: Vec<_> = param_tys.into_iter()
1317 .map(|(i, (expected_ty, &actual_val))| {
1318 let actual_ty = self.cx().val_ty(actual_val);
1319 if expected_ty != actual_ty {
1320 debug!("Type mismatch in function call of {:?}. \
1321 Expected {:?} for param {}, got {:?}; injecting bitcast",
1322 llfn, expected_ty, i, actual_ty);
1323 self.bitcast(actual_val, expected_ty)
1330 Cow::Owned(casted_args)
1333 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
1334 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1337 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
1338 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1341 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1342 if self.cx.sess().opts.optimize == config::OptLevel::No {
1346 let size = size.bytes();
1351 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1353 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1354 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1360 args: &[&'ll Value],
1361 funclet: Option<&Funclet<'ll>>,
1363 self.count_insn("call");
1365 debug!("Call {:?} with args ({:?})",
1369 let args = self.check_call("call", llfn, args);
1370 let bundle = funclet.map(|funclet| funclet.bundle());
1371 let bundle = bundle.as_ref().map(|b| &*b.raw);
1374 llvm::LLVMRustBuildCall(
1377 args.as_ptr() as *const &llvm::Value,
1378 args.len() as c_uint,
1384 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1385 self.count_insn("zext");
1387 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname())
1391 fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
1392 self.count_insn("structgep");
1393 assert_eq!(idx as c_uint as u64, idx);
1395 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
1399 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1403 fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
1405 llvm::LLVMDeleteBasicBlock(bb);
1409 fn do_not_inline(&mut self, llret: &'ll Value) {
1410 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);