1 use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope};
2 use crate::llvm::{self, False, BasicBlock};
3 use crate::common::Funclet;
4 use crate::context::CodegenCx;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::value::Value;
8 use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate};
9 use rustc_codegen_ssa::MemFlags;
10 use libc::{c_uint, c_char};
11 use rustc::ty::{self, Ty, TyCtxt};
12 use rustc::ty::layout::{self, Align, Size, TyLayout};
13 use rustc::hir::def_id::DefId;
14 use rustc::session::config;
15 use rustc_data_structures::small_c_str::SmallCStr;
16 use rustc_codegen_ssa::traits::*;
17 use rustc_codegen_ssa::base::to_immediate;
18 use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef};
19 use rustc_codegen_ssa::mir::place::PlaceRef;
20 use rustc_target::spec::{HasTargetSpec, Target};
23 use std::ops::{Deref, Range};
25 use std::iter::TrustedLen;
26 use rustc_data_structures::const_cstr;
29 // All Builders must have an llfn associated with them
31 pub struct Builder<'a, 'll, 'tcx> {
32 pub llbuilder: &'ll mut llvm::Builder<'ll>,
33 pub cx: &'a CodegenCx<'ll, 'tcx>,
36 impl Drop for Builder<'a, 'll, 'tcx> {
39 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
44 // FIXME(eddyb) use a checked constructor when they become `const fn`.
45 const EMPTY_C_STR: &CStr = unsafe {
46 CStr::from_bytes_with_nul_unchecked(b"\0")
49 /// Empty string, to be used where LLVM expects an instruction name, indicating
50 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
51 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
52 const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
54 impl BackendTypes for Builder<'_, 'll, 'tcx> {
55 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
56 type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
57 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
58 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
59 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
61 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
64 impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
65 fn data_layout(&self) -> &ty::layout::TargetDataLayout {
70 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
71 fn tcx(&self) -> TyCtxt<'tcx> {
76 impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
77 fn param_env(&self) -> ty::ParamEnv<'tcx> {
82 impl HasTargetSpec for Builder<'_, '_, 'tcx> {
83 fn target_spec(&self) -> &Target {
84 &self.cx.target_spec()
88 impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
90 type TyLayout = TyLayout<'tcx>;
92 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
97 impl Deref for Builder<'_, 'll, 'tcx> {
98 type Target = CodegenCx<'ll, 'tcx>;
100 fn deref(&self) -> &Self::Target {
105 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
106 type CodegenCx = CodegenCx<'ll, 'tcx>;
109 macro_rules! builder_methods_for_value_instructions {
110 ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
111 $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
113 llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
119 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
121 cx: &'a CodegenCx<'ll, 'tcx>,
125 let mut bx = Builder::with_cx(cx);
127 let name = SmallCStr::new(name);
128 llvm::LLVMAppendBasicBlockInContext(
134 bx.position_at_end(llbb);
138 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
139 // Create a fresh builder from the crate context.
140 let llbuilder = unsafe {
141 llvm::LLVMCreateBuilderInContext(cx.llcx)
149 fn build_sibling_block(&self, name: &str) -> Self {
150 Builder::new_block(self.cx, self.llfn(), name)
153 fn llbb(&self) -> &'ll BasicBlock {
155 llvm::LLVMGetInsertBlock(self.llbuilder)
159 fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
161 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
165 fn ret_void(&mut self) {
167 llvm::LLVMBuildRetVoid(self.llbuilder);
171 fn ret(&mut self, v: &'ll Value) {
173 llvm::LLVMBuildRet(self.llbuilder, v);
177 fn br(&mut self, dest: &'ll BasicBlock) {
179 llvm::LLVMBuildBr(self.llbuilder, dest);
186 then_llbb: &'ll BasicBlock,
187 else_llbb: &'ll BasicBlock,
190 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
197 else_llbb: &'ll BasicBlock,
198 cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)> + TrustedLen,
200 let switch = unsafe {
201 llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint)
203 for (on_val, dest) in cases {
204 let on_val = self.const_uint_big(self.val_ty(v), on_val);
206 llvm::LLVMAddCase(switch, on_val, dest)
215 then: &'ll BasicBlock,
216 catch: &'ll BasicBlock,
217 funclet: Option<&Funclet<'ll>>,
220 debug!("invoke {:?} with args ({:?})",
224 let args = self.check_call("invoke", llfn, args);
225 let bundle = funclet.map(|funclet| funclet.bundle());
226 let bundle = bundle.as_ref().map(|b| &*b.raw);
229 llvm::LLVMRustBuildInvoke(self.llbuilder,
232 args.len() as c_uint,
240 fn unreachable(&mut self) {
242 llvm::LLVMBuildUnreachable(self.llbuilder);
246 builder_methods_for_value_instructions! {
247 add(a, b) => LLVMBuildAdd,
248 fadd(a, b) => LLVMBuildFAdd,
249 sub(a, b) => LLVMBuildSub,
250 fsub(a, b) => LLVMBuildFSub,
251 mul(a, b) => LLVMBuildMul,
252 fmul(a, b) => LLVMBuildFMul,
253 udiv(a, b) => LLVMBuildUDiv,
254 exactudiv(a, b) => LLVMBuildExactUDiv,
255 sdiv(a, b) => LLVMBuildSDiv,
256 exactsdiv(a, b) => LLVMBuildExactSDiv,
257 fdiv(a, b) => LLVMBuildFDiv,
258 urem(a, b) => LLVMBuildURem,
259 srem(a, b) => LLVMBuildSRem,
260 frem(a, b) => LLVMBuildFRem,
261 shl(a, b) => LLVMBuildShl,
262 lshr(a, b) => LLVMBuildLShr,
263 ashr(a, b) => LLVMBuildAShr,
264 and(a, b) => LLVMBuildAnd,
265 or(a, b) => LLVMBuildOr,
266 xor(a, b) => LLVMBuildXor,
267 neg(x) => LLVMBuildNeg,
268 fneg(x) => LLVMBuildFNeg,
269 not(x) => LLVMBuildNot,
270 unchecked_sadd(x, y) => LLVMBuildNSWAdd,
271 unchecked_uadd(x, y) => LLVMBuildNUWAdd,
272 unchecked_ssub(x, y) => LLVMBuildNSWSub,
273 unchecked_usub(x, y) => LLVMBuildNUWSub,
274 unchecked_smul(x, y) => LLVMBuildNSWMul,
275 unchecked_umul(x, y) => LLVMBuildNUWMul,
278 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
280 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
281 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
286 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
288 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
289 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
294 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
296 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
297 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
302 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
304 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
305 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
310 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
312 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
313 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
324 ) -> (Self::Value, Self::Value) {
325 use syntax::ast::IntTy::*;
326 use syntax::ast::UintTy::*;
327 use rustc::ty::{Int, Uint};
329 let new_kind = match ty.kind {
330 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
331 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
332 ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
333 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
336 let name = match oop {
337 OverflowOp::Add => match new_kind {
338 Int(I8) => "llvm.sadd.with.overflow.i8",
339 Int(I16) => "llvm.sadd.with.overflow.i16",
340 Int(I32) => "llvm.sadd.with.overflow.i32",
341 Int(I64) => "llvm.sadd.with.overflow.i64",
342 Int(I128) => "llvm.sadd.with.overflow.i128",
344 Uint(U8) => "llvm.uadd.with.overflow.i8",
345 Uint(U16) => "llvm.uadd.with.overflow.i16",
346 Uint(U32) => "llvm.uadd.with.overflow.i32",
347 Uint(U64) => "llvm.uadd.with.overflow.i64",
348 Uint(U128) => "llvm.uadd.with.overflow.i128",
352 OverflowOp::Sub => match new_kind {
353 Int(I8) => "llvm.ssub.with.overflow.i8",
354 Int(I16) => "llvm.ssub.with.overflow.i16",
355 Int(I32) => "llvm.ssub.with.overflow.i32",
356 Int(I64) => "llvm.ssub.with.overflow.i64",
357 Int(I128) => "llvm.ssub.with.overflow.i128",
359 Uint(U8) => "llvm.usub.with.overflow.i8",
360 Uint(U16) => "llvm.usub.with.overflow.i16",
361 Uint(U32) => "llvm.usub.with.overflow.i32",
362 Uint(U64) => "llvm.usub.with.overflow.i64",
363 Uint(U128) => "llvm.usub.with.overflow.i128",
367 OverflowOp::Mul => match new_kind {
368 Int(I8) => "llvm.smul.with.overflow.i8",
369 Int(I16) => "llvm.smul.with.overflow.i16",
370 Int(I32) => "llvm.smul.with.overflow.i32",
371 Int(I64) => "llvm.smul.with.overflow.i64",
372 Int(I128) => "llvm.smul.with.overflow.i128",
374 Uint(U8) => "llvm.umul.with.overflow.i8",
375 Uint(U16) => "llvm.umul.with.overflow.i16",
376 Uint(U32) => "llvm.umul.with.overflow.i32",
377 Uint(U64) => "llvm.umul.with.overflow.i64",
378 Uint(U128) => "llvm.umul.with.overflow.i128",
384 let intrinsic = self.get_intrinsic(&name);
385 let res = self.call(intrinsic, &[lhs, rhs], None);
387 self.extract_value(res, 0),
388 self.extract_value(res, 1),
392 fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
393 let mut bx = Builder::with_cx(self.cx);
394 bx.position_at_start(unsafe {
395 llvm::LLVMGetFirstBasicBlock(self.llfn())
397 bx.dynamic_alloca(ty, align)
400 fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
402 let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
403 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
408 fn array_alloca(&mut self,
411 align: Align) -> &'ll Value {
413 let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
414 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
419 fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
421 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
422 llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
427 fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
429 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
430 llvm::LLVMSetVolatile(load, llvm::True);
438 order: rustc_codegen_ssa::common::AtomicOrdering,
442 let load = llvm::LLVMRustBuildAtomicLoad(
446 AtomicOrdering::from_generic(order),
448 // LLVM requires the alignment of atomic loads to be at least the size of the type.
449 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
456 place: PlaceRef<'tcx, &'ll Value>
457 ) -> OperandRef<'tcx, &'ll Value> {
458 debug!("PlaceRef::load: {:?}", place);
460 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
462 if place.layout.is_zst() {
463 return OperandRef::new_zst(self, place.layout);
466 fn scalar_load_metadata<'a, 'll, 'tcx>(
467 bx: &mut Builder<'a, 'll, 'tcx>,
469 scalar: &layout::Scalar
471 let vr = scalar.valid_range.clone();
474 let range = scalar.valid_range_exclusive(bx);
475 if range.start != range.end {
476 bx.range_metadata(load, range);
479 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
480 bx.nonnull_metadata(load);
486 let val = if let Some(llextra) = place.llextra {
487 OperandValue::Ref(place.llval, Some(llextra), place.align)
488 } else if place.layout.is_llvm_immediate() {
489 let mut const_llval = None;
491 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
492 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
493 const_llval = llvm::LLVMGetInitializer(global);
497 let llval = const_llval.unwrap_or_else(|| {
498 let load = self.load(place.llval, place.align);
499 if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
500 scalar_load_metadata(self, load, scalar);
504 OperandValue::Immediate(to_immediate(self, llval, place.layout))
505 } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
506 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
508 let mut load = |i, scalar: &layout::Scalar, align| {
509 let llptr = self.struct_gep(place.llval, i as u64);
510 let load = self.load(llptr, align);
511 scalar_load_metadata(self, load, scalar);
512 if scalar.is_bool() {
513 self.trunc(load, self.type_i1())
520 load(0, a, place.align),
521 load(1, b, place.align.restrict_for_offset(b_offset)),
524 OperandValue::Ref(place.llval, None, place.align)
527 OperandRef { val, layout: place.layout }
530 fn write_operand_repeatedly(
532 cg_elem: OperandRef<'tcx, &'ll Value>,
534 dest: PlaceRef<'tcx, &'ll Value>,
536 let zero = self.const_usize(0);
537 let count = self.const_usize(count);
538 let start = dest.project_index(&mut self, zero).llval;
539 let end = dest.project_index(&mut self, count).llval;
541 let mut header_bx = self.build_sibling_block("repeat_loop_header");
542 let mut body_bx = self.build_sibling_block("repeat_loop_body");
543 let next_bx = self.build_sibling_block("repeat_loop_next");
545 self.br(header_bx.llbb());
546 let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
548 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
549 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
551 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
552 cg_elem.val.store(&mut body_bx,
553 PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
555 let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
556 body_bx.br(header_bx.llbb());
557 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
562 fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
563 if self.sess().target.target.arch == "amdgpu" {
564 // amdgpu/LLVM does something weird and thinks a i64 value is
565 // split into a v2i32, halving the bitwidth LLVM expects,
566 // tripping an assertion. So, for now, just disable this
572 let llty = self.cx.val_ty(load);
574 self.cx.const_uint_big(llty, range.start),
575 self.cx.const_uint_big(llty, range.end)
578 llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
579 llvm::LLVMMDNodeInContext(self.cx.llcx,
585 fn nonnull_metadata(&mut self, load: &'ll Value) {
587 llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
588 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
592 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
593 self.store_with_flags(val, ptr, align, MemFlags::empty())
603 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
604 let ptr = self.check_store(val, ptr);
606 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
607 let align = if flags.contains(MemFlags::UNALIGNED) {
610 align.bytes() as c_uint
612 llvm::LLVMSetAlignment(store, align);
613 if flags.contains(MemFlags::VOLATILE) {
614 llvm::LLVMSetVolatile(store, llvm::True);
616 if flags.contains(MemFlags::NONTEMPORAL) {
617 // According to LLVM [1] building a nontemporal store must
618 // *always* point to a metadata value of the integer 1.
620 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
621 let one = self.cx.const_i32(1);
622 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
623 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
629 fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value,
630 order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) {
631 debug!("Store {:?} -> {:?}", val, ptr);
632 let ptr = self.check_store(val, ptr);
634 let store = llvm::LLVMRustBuildAtomicStore(
638 AtomicOrdering::from_generic(order),
640 // LLVM requires the alignment of atomic stores to be at least the size of the type.
641 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
645 fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
647 llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
648 indices.len() as c_uint, UNNAMED)
652 fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
654 llvm::LLVMBuildInBoundsGEP(
655 self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, UNNAMED)
659 fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
660 assert_eq!(idx as c_uint as u64, idx);
662 llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED)
667 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
669 llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED)
673 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
675 llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED)
679 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
681 llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED)
685 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
687 llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,UNNAMED)
691 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
693 llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED)
697 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
699 llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED)
703 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
705 llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED)
709 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
711 llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED)
715 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
717 llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED)
721 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
723 llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED)
727 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
729 llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED)
734 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
736 llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
740 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
742 llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED)
747 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
748 let op = llvm::IntPredicate::from_generic(op);
750 llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED)
754 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
756 llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED)
760 /* Miscellaneous instructions */
761 fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
762 src: &'ll Value, src_align: Align,
763 size: &'ll Value, flags: MemFlags) {
764 if flags.contains(MemFlags::NONTEMPORAL) {
765 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
766 let val = self.load(src, src_align);
767 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
768 self.store_with_flags(val, ptr, dst_align, flags);
771 let size = self.intcast(size, self.type_isize(), false);
772 let is_volatile = flags.contains(MemFlags::VOLATILE);
773 let dst = self.pointercast(dst, self.type_i8p());
774 let src = self.pointercast(src, self.type_i8p());
776 llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
777 src, src_align.bytes() as c_uint, size, is_volatile);
781 fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
782 src: &'ll Value, src_align: Align,
783 size: &'ll Value, flags: MemFlags) {
784 if flags.contains(MemFlags::NONTEMPORAL) {
785 // HACK(nox): This is inefficient but there is no nontemporal memmove.
786 let val = self.load(src, src_align);
787 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
788 self.store_with_flags(val, ptr, dst_align, flags);
791 let size = self.intcast(size, self.type_isize(), false);
792 let is_volatile = flags.contains(MemFlags::VOLATILE);
793 let dst = self.pointercast(dst, self.type_i8p());
794 let src = self.pointercast(src, self.type_i8p());
796 llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
797 src, src_align.bytes() as c_uint, size, is_volatile);
804 fill_byte: &'ll Value,
809 let ptr_width = &self.sess().target.target.target_pointer_width;
810 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
811 let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
812 let ptr = self.pointercast(ptr, self.type_i8p());
813 let align = self.const_u32(align.bytes() as u32);
814 let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
815 self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
819 &mut self, cond: &'ll Value,
820 then_val: &'ll Value,
821 else_val: &'ll Value,
824 llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED)
829 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
831 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED)
835 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
837 llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED)
841 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
843 let elt_ty = self.cx.val_ty(elt);
844 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
845 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
846 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
847 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
851 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
852 assert_eq!(idx as c_uint as u64, idx);
854 llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED)
858 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value,
859 idx: u64) -> &'ll Value {
860 assert_eq!(idx as c_uint as u64, idx);
862 llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
867 fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value,
868 num_clauses: usize) -> &'ll Value {
870 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
871 num_clauses as c_uint, UNNAMED)
875 fn set_cleanup(&mut self, landing_pad: &'ll Value) {
877 llvm::LLVMSetCleanup(landing_pad, llvm::True);
881 fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
883 llvm::LLVMBuildResume(self.llbuilder, exn)
887 fn cleanup_pad(&mut self,
888 parent: Option<&'ll Value>,
889 args: &[&'ll Value]) -> Funclet<'ll> {
890 let name = const_cstr!("cleanuppad");
892 llvm::LLVMRustBuildCleanupPad(self.llbuilder,
894 args.len() as c_uint,
898 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
902 &mut self, funclet: &Funclet<'ll>,
903 unwind: Option<&'ll BasicBlock>,
906 llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
908 ret.expect("LLVM does not have support for cleanupret")
911 fn catch_pad(&mut self,
913 args: &[&'ll Value]) -> Funclet<'ll> {
914 let name = const_cstr!("catchpad");
916 llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
917 args.len() as c_uint, args.as_ptr(),
920 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
925 parent: Option<&'ll Value>,
926 unwind: Option<&'ll BasicBlock>,
929 let name = const_cstr!("catchswitch");
931 llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
932 num_handlers as c_uint,
935 ret.expect("LLVM does not have support for catchswitch")
938 fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
940 llvm::LLVMRustAddHandler(catch_switch, handler);
944 fn set_personality_fn(&mut self, personality: &'ll Value) {
946 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
956 order: rustc_codegen_ssa::common::AtomicOrdering,
957 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
960 let weak = if weak { llvm::True } else { llvm::False };
962 llvm::LLVMRustBuildAtomicCmpXchg(
967 AtomicOrdering::from_generic(order),
968 AtomicOrdering::from_generic(failure_order),
975 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
978 order: rustc_codegen_ssa::common::AtomicOrdering,
981 llvm::LLVMBuildAtomicRMW(
983 AtomicRmwBinOp::from_generic(op),
986 AtomicOrdering::from_generic(order),
993 order: rustc_codegen_ssa::common::AtomicOrdering,
994 scope: rustc_codegen_ssa::common::SynchronizationScope
997 llvm::LLVMRustBuildAtomicFence(
999 AtomicOrdering::from_generic(order),
1000 SynchronizationScope::from_generic(scope)
1005 fn set_invariant_load(&mut self, load: &'ll Value) {
1007 llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
1008 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
1012 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
1013 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
1016 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
1017 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
1023 args: &[&'ll Value],
1024 funclet: Option<&Funclet<'ll>>,
1027 debug!("call {:?} with args ({:?})",
1031 let args = self.check_call("call", llfn, args);
1032 let bundle = funclet.map(|funclet| funclet.bundle());
1033 let bundle = bundle.as_ref().map(|b| &*b.raw);
1036 llvm::LLVMRustBuildCall(
1039 args.as_ptr() as *const &llvm::Value,
1040 args.len() as c_uint,
1046 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1048 llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED)
1053 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1057 unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
1058 llvm::LLVMDeleteBasicBlock(bb);
1061 fn do_not_inline(&mut self, llret: &'ll Value) {
1062 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
1066 impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
1067 fn get_static(&mut self, def_id: DefId) -> &'ll Value {
1068 // Forward to the `get_static` method of `CodegenCx`
1069 self.cx().get_static(def_id)
1073 impl Builder<'a, 'll, 'tcx> {
1074 pub fn llfn(&self) -> &'ll Value {
1076 llvm::LLVMGetBasicBlockParent(self.llbb())
1080 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
1082 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
1086 pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1087 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1090 pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1091 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1094 pub fn insert_element(
1095 &mut self, vec: &'ll Value,
1100 llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED)
1104 pub fn shuffle_vector(
1111 llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED)
1115 pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1116 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1118 pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1119 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1121 pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1123 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1124 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1128 pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1130 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1131 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1135 pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1136 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1138 pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1139 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1141 pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1142 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1144 pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1145 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1147 pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1148 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1150 pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1151 unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
1153 pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1154 unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
1156 pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1158 let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1159 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1163 pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1165 let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1166 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1170 pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1171 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1173 pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1174 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1177 pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1179 llvm::LLVMAddClause(landing_pad, clause);
1183 pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1185 llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind)
1187 ret.expect("LLVM does not have support for catchret")
1190 fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
1191 let dest_ptr_ty = self.cx.val_ty(ptr);
1192 let stored_ty = self.cx.val_ty(val);
1193 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1195 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1197 if dest_ptr_ty == stored_ptr_ty {
1200 debug!("type mismatch in store. \
1201 Expected {:?}, got {:?}; inserting bitcast",
1202 dest_ptr_ty, stored_ptr_ty);
1203 self.bitcast(ptr, stored_ptr_ty)
1207 fn check_call<'b>(&mut self,
1210 args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
1211 let mut fn_ty = self.cx.val_ty(llfn);
1212 // Strip off pointers
1213 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1214 fn_ty = self.cx.element_type(fn_ty);
1217 assert!(self.cx.type_kind(fn_ty) == TypeKind::Function,
1218 "builder::{} not passed a function, but {:?}", typ, fn_ty);
1220 let param_tys = self.cx.func_params_types(fn_ty);
1222 let all_args_match = param_tys.iter()
1223 .zip(args.iter().map(|&v| self.val_ty(v)))
1224 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1227 return Cow::Borrowed(args);
1230 let casted_args: Vec<_> = param_tys.into_iter()
1233 .map(|(i, (expected_ty, &actual_val))| {
1234 let actual_ty = self.val_ty(actual_val);
1235 if expected_ty != actual_ty {
1236 debug!("type mismatch in function call of {:?}. \
1237 Expected {:?} for param {}, got {:?}; injecting bitcast",
1238 llfn, expected_ty, i, actual_ty);
1239 self.bitcast(actual_val, expected_ty)
1246 Cow::Owned(casted_args)
1249 pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
1251 llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED)
1255 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1256 if self.cx.sess().opts.optimize == config::OptLevel::No {
1260 let size = size.bytes();
1265 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1267 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1268 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1271 fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
1272 assert_eq!(vals.len(), bbs.len());
1274 llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED)
1277 llvm::LLVMAddIncoming(phi, vals.as_ptr(),
1279 vals.len() as c_uint);
1284 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1286 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);