1 use crate::common::Funclet;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, BasicBlock, False};
4 use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::value::Value;
8 use libc::{c_char, c_uint};
10 use rustc::hir::def_id::DefId;
11 use rustc::session::config;
12 use rustc::ty::layout::{self, Align, Size, TyLayout};
13 use rustc::ty::{self, Ty, TyCtxt};
14 use rustc_codegen_ssa::base::to_immediate;
15 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
16 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
17 use rustc_codegen_ssa::mir::place::PlaceRef;
18 use rustc_codegen_ssa::traits::*;
19 use rustc_codegen_ssa::MemFlags;
20 use rustc_data_structures::const_cstr;
21 use rustc_data_structures::small_c_str::SmallCStr;
22 use rustc_target::spec::{HasTargetSpec, Target};
25 use std::iter::TrustedLen;
26 use std::ops::{Deref, Range};
29 // All Builders must have an llfn associated with them
31 pub struct Builder<'a, 'll, 'tcx> {
32 pub llbuilder: &'ll mut llvm::Builder<'ll>,
33 pub cx: &'a CodegenCx<'ll, 'tcx>,
36 impl Drop for Builder<'a, 'll, 'tcx> {
39 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
44 // FIXME(eddyb) use a checked constructor when they become `const fn`.
45 const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
47 /// Empty string, to be used where LLVM expects an instruction name, indicating
48 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
49 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
50 const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
52 impl BackendTypes for Builder<'_, 'll, 'tcx> {
53 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
54 type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
55 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
56 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
57 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
59 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
62 impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
63 fn data_layout(&self) -> &ty::layout::TargetDataLayout {
68 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
69 fn tcx(&self) -> TyCtxt<'tcx> {
74 impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
75 fn param_env(&self) -> ty::ParamEnv<'tcx> {
80 impl HasTargetSpec for Builder<'_, '_, 'tcx> {
81 fn target_spec(&self) -> &Target {
82 &self.cx.target_spec()
86 impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
88 type TyLayout = TyLayout<'tcx>;
90 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
95 impl Deref for Builder<'_, 'll, 'tcx> {
96 type Target = CodegenCx<'ll, 'tcx>;
98 fn deref(&self) -> &Self::Target {
103 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
104 type CodegenCx = CodegenCx<'ll, 'tcx>;
107 macro_rules! builder_methods_for_value_instructions {
108 ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
109 $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
111 llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
117 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
118 fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self {
119 let mut bx = Builder::with_cx(cx);
121 let name = SmallCStr::new(name);
122 llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
124 bx.position_at_end(llbb);
128 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
129 // Create a fresh builder from the crate context.
130 let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
131 Builder { llbuilder, cx }
134 fn build_sibling_block(&self, name: &str) -> Self {
135 Builder::new_block(self.cx, self.llfn(), name)
138 fn llbb(&self) -> &'ll BasicBlock {
139 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
142 fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
144 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
148 fn ret_void(&mut self) {
150 llvm::LLVMBuildRetVoid(self.llbuilder);
154 fn ret(&mut self, v: &'ll Value) {
156 llvm::LLVMBuildRet(self.llbuilder, v);
160 fn br(&mut self, dest: &'ll BasicBlock) {
162 llvm::LLVMBuildBr(self.llbuilder, dest);
169 then_llbb: &'ll BasicBlock,
170 else_llbb: &'ll BasicBlock,
173 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
180 else_llbb: &'ll BasicBlock,
181 cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)> + TrustedLen,
184 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
185 for (on_val, dest) in cases {
186 let on_val = self.const_uint_big(self.val_ty(v), on_val);
187 unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
195 then: &'ll BasicBlock,
196 catch: &'ll BasicBlock,
197 funclet: Option<&Funclet<'ll>>,
199 debug!("invoke {:?} with args ({:?})", llfn, args);
201 let args = self.check_call("invoke", llfn, args);
202 let bundle = funclet.map(|funclet| funclet.bundle());
203 let bundle = bundle.as_ref().map(|b| &*b.raw);
206 llvm::LLVMRustBuildInvoke(
210 args.len() as c_uint,
219 fn unreachable(&mut self) {
221 llvm::LLVMBuildUnreachable(self.llbuilder);
225 builder_methods_for_value_instructions! {
226 add(a, b) => LLVMBuildAdd,
227 fadd(a, b) => LLVMBuildFAdd,
228 sub(a, b) => LLVMBuildSub,
229 fsub(a, b) => LLVMBuildFSub,
230 mul(a, b) => LLVMBuildMul,
231 fmul(a, b) => LLVMBuildFMul,
232 udiv(a, b) => LLVMBuildUDiv,
233 exactudiv(a, b) => LLVMBuildExactUDiv,
234 sdiv(a, b) => LLVMBuildSDiv,
235 exactsdiv(a, b) => LLVMBuildExactSDiv,
236 fdiv(a, b) => LLVMBuildFDiv,
237 urem(a, b) => LLVMBuildURem,
238 srem(a, b) => LLVMBuildSRem,
239 frem(a, b) => LLVMBuildFRem,
240 shl(a, b) => LLVMBuildShl,
241 lshr(a, b) => LLVMBuildLShr,
242 ashr(a, b) => LLVMBuildAShr,
243 and(a, b) => LLVMBuildAnd,
244 or(a, b) => LLVMBuildOr,
245 xor(a, b) => LLVMBuildXor,
246 neg(x) => LLVMBuildNeg,
247 fneg(x) => LLVMBuildFNeg,
248 not(x) => LLVMBuildNot,
249 unchecked_sadd(x, y) => LLVMBuildNSWAdd,
250 unchecked_uadd(x, y) => LLVMBuildNUWAdd,
251 unchecked_ssub(x, y) => LLVMBuildNSWSub,
252 unchecked_usub(x, y) => LLVMBuildNUWSub,
253 unchecked_smul(x, y) => LLVMBuildNSWMul,
254 unchecked_umul(x, y) => LLVMBuildNUWMul,
257 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
259 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
260 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
265 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
267 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
268 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
273 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
275 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
276 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
281 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
283 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
284 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
289 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
291 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
292 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
303 ) -> (Self::Value, Self::Value) {
304 use rustc::ty::{Int, Uint};
305 use syntax::ast::IntTy::*;
306 use syntax::ast::UintTy::*;
308 let new_kind = match ty.kind {
309 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
310 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
311 ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
312 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
315 let name = match oop {
316 OverflowOp::Add => match new_kind {
317 Int(I8) => "llvm.sadd.with.overflow.i8",
318 Int(I16) => "llvm.sadd.with.overflow.i16",
319 Int(I32) => "llvm.sadd.with.overflow.i32",
320 Int(I64) => "llvm.sadd.with.overflow.i64",
321 Int(I128) => "llvm.sadd.with.overflow.i128",
323 Uint(U8) => "llvm.uadd.with.overflow.i8",
324 Uint(U16) => "llvm.uadd.with.overflow.i16",
325 Uint(U32) => "llvm.uadd.with.overflow.i32",
326 Uint(U64) => "llvm.uadd.with.overflow.i64",
327 Uint(U128) => "llvm.uadd.with.overflow.i128",
331 OverflowOp::Sub => match new_kind {
332 Int(I8) => "llvm.ssub.with.overflow.i8",
333 Int(I16) => "llvm.ssub.with.overflow.i16",
334 Int(I32) => "llvm.ssub.with.overflow.i32",
335 Int(I64) => "llvm.ssub.with.overflow.i64",
336 Int(I128) => "llvm.ssub.with.overflow.i128",
338 Uint(U8) => "llvm.usub.with.overflow.i8",
339 Uint(U16) => "llvm.usub.with.overflow.i16",
340 Uint(U32) => "llvm.usub.with.overflow.i32",
341 Uint(U64) => "llvm.usub.with.overflow.i64",
342 Uint(U128) => "llvm.usub.with.overflow.i128",
346 OverflowOp::Mul => match new_kind {
347 Int(I8) => "llvm.smul.with.overflow.i8",
348 Int(I16) => "llvm.smul.with.overflow.i16",
349 Int(I32) => "llvm.smul.with.overflow.i32",
350 Int(I64) => "llvm.smul.with.overflow.i64",
351 Int(I128) => "llvm.smul.with.overflow.i128",
353 Uint(U8) => "llvm.umul.with.overflow.i8",
354 Uint(U16) => "llvm.umul.with.overflow.i16",
355 Uint(U32) => "llvm.umul.with.overflow.i32",
356 Uint(U64) => "llvm.umul.with.overflow.i64",
357 Uint(U128) => "llvm.umul.with.overflow.i128",
363 let intrinsic = self.get_intrinsic(&name);
364 let res = self.call(intrinsic, &[lhs, rhs], None);
365 (self.extract_value(res, 0), self.extract_value(res, 1))
368 fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
369 let mut bx = Builder::with_cx(self.cx);
370 bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
371 bx.dynamic_alloca(ty, align)
374 fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
376 let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
377 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
382 fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
384 let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
385 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
390 fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
392 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
393 llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
398 fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
400 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
401 llvm::LLVMSetVolatile(load, llvm::True);
409 order: rustc_codegen_ssa::common::AtomicOrdering,
413 let load = llvm::LLVMRustBuildAtomicLoad(
417 AtomicOrdering::from_generic(order),
419 // LLVM requires the alignment of atomic loads to be at least the size of the type.
420 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
425 fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
426 debug!("PlaceRef::load: {:?}", place);
428 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
430 if place.layout.is_zst() {
431 return OperandRef::new_zst(self, place.layout);
434 fn scalar_load_metadata<'a, 'll, 'tcx>(
435 bx: &mut Builder<'a, 'll, 'tcx>,
437 scalar: &layout::Scalar,
439 let vr = scalar.valid_range.clone();
442 let range = scalar.valid_range_exclusive(bx);
443 if range.start != range.end {
444 bx.range_metadata(load, range);
447 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
448 bx.nonnull_metadata(load);
454 let val = if let Some(llextra) = place.llextra {
455 OperandValue::Ref(place.llval, Some(llextra), place.align)
456 } else if place.layout.is_llvm_immediate() {
457 let mut const_llval = None;
459 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
460 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
461 const_llval = llvm::LLVMGetInitializer(global);
465 let llval = const_llval.unwrap_or_else(|| {
466 let load = self.load(place.llval, place.align);
467 if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
468 scalar_load_metadata(self, load, scalar);
472 OperandValue::Immediate(to_immediate(self, llval, place.layout))
473 } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
474 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
476 let mut load = |i, scalar: &layout::Scalar, align| {
477 let llptr = self.struct_gep(place.llval, i as u64);
478 let load = self.load(llptr, align);
479 scalar_load_metadata(self, load, scalar);
480 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
484 load(0, a, place.align),
485 load(1, b, place.align.restrict_for_offset(b_offset)),
488 OperandValue::Ref(place.llval, None, place.align)
491 OperandRef { val, layout: place.layout }
494 fn write_operand_repeatedly(
496 cg_elem: OperandRef<'tcx, &'ll Value>,
498 dest: PlaceRef<'tcx, &'ll Value>,
500 let zero = self.const_usize(0);
501 let count = self.const_usize(count);
502 let start = dest.project_index(&mut self, zero).llval;
503 let end = dest.project_index(&mut self, count).llval;
505 let mut header_bx = self.build_sibling_block("repeat_loop_header");
506 let mut body_bx = self.build_sibling_block("repeat_loop_body");
507 let next_bx = self.build_sibling_block("repeat_loop_next");
509 self.br(header_bx.llbb());
510 let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
512 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
513 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
515 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
518 .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
520 let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
521 body_bx.br(header_bx.llbb());
522 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
527 fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
528 if self.sess().target.target.arch == "amdgpu" {
529 // amdgpu/LLVM does something weird and thinks a i64 value is
530 // split into a v2i32, halving the bitwidth LLVM expects,
531 // tripping an assertion. So, for now, just disable this
537 let llty = self.cx.val_ty(load);
539 self.cx.const_uint_big(llty, range.start),
540 self.cx.const_uint_big(llty, range.end),
543 llvm::LLVMSetMetadata(
545 llvm::MD_range as c_uint,
546 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
551 fn nonnull_metadata(&mut self, load: &'ll Value) {
553 llvm::LLVMSetMetadata(
555 llvm::MD_nonnull as c_uint,
556 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
561 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
562 self.store_with_flags(val, ptr, align, MemFlags::empty())
572 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
573 let ptr = self.check_store(val, ptr);
575 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
577 if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
578 llvm::LLVMSetAlignment(store, align);
579 if flags.contains(MemFlags::VOLATILE) {
580 llvm::LLVMSetVolatile(store, llvm::True);
582 if flags.contains(MemFlags::NONTEMPORAL) {
583 // According to LLVM [1] building a nontemporal store must
584 // *always* point to a metadata value of the integer 1.
586 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
587 let one = self.cx.const_i32(1);
588 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
589 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
599 order: rustc_codegen_ssa::common::AtomicOrdering,
602 debug!("Store {:?} -> {:?}", val, ptr);
603 let ptr = self.check_store(val, ptr);
605 let store = llvm::LLVMRustBuildAtomicStore(
609 AtomicOrdering::from_generic(order),
611 // LLVM requires the alignment of atomic stores to be at least the size of the type.
612 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
616 fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
622 indices.len() as c_uint,
628 fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
630 llvm::LLVMBuildInBoundsGEP(
634 indices.len() as c_uint,
640 fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
641 assert_eq!(idx as c_uint as u64, idx);
642 unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
646 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
647 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
650 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
651 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
654 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
655 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
658 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
659 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
662 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
663 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
666 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
667 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
670 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
671 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
674 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
675 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
678 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
679 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
682 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
683 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
686 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
687 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
690 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
691 unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
694 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
695 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
699 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
700 let op = llvm::IntPredicate::from_generic(op);
701 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
704 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
705 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
708 /* Miscellaneous instructions */
718 if flags.contains(MemFlags::NONTEMPORAL) {
719 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
720 let val = self.load(src, src_align);
721 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
722 self.store_with_flags(val, ptr, dst_align, flags);
725 let size = self.intcast(size, self.type_isize(), false);
726 let is_volatile = flags.contains(MemFlags::VOLATILE);
727 let dst = self.pointercast(dst, self.type_i8p());
728 let src = self.pointercast(src, self.type_i8p());
730 llvm::LLVMRustBuildMemCpy(
733 dst_align.bytes() as c_uint,
735 src_align.bytes() as c_uint,
751 if flags.contains(MemFlags::NONTEMPORAL) {
752 // HACK(nox): This is inefficient but there is no nontemporal memmove.
753 let val = self.load(src, src_align);
754 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
755 self.store_with_flags(val, ptr, dst_align, flags);
758 let size = self.intcast(size, self.type_isize(), false);
759 let is_volatile = flags.contains(MemFlags::VOLATILE);
760 let dst = self.pointercast(dst, self.type_i8p());
761 let src = self.pointercast(src, self.type_i8p());
763 llvm::LLVMRustBuildMemMove(
766 dst_align.bytes() as c_uint,
768 src_align.bytes() as c_uint,
778 fill_byte: &'ll Value,
783 let ptr_width = &self.sess().target.target.target_pointer_width;
784 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
785 let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
786 let ptr = self.pointercast(ptr, self.type_i8p());
787 let align = self.const_u32(align.bytes() as u32);
788 let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
789 self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
795 then_val: &'ll Value,
796 else_val: &'ll Value,
798 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
802 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
803 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
806 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
807 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
810 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
812 let elt_ty = self.cx.val_ty(elt);
813 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
814 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
815 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
816 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
820 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
821 assert_eq!(idx as c_uint as u64, idx);
822 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
825 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
826 assert_eq!(idx as c_uint as u64, idx);
827 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
837 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
841 fn set_cleanup(&mut self, landing_pad: &'ll Value) {
843 llvm::LLVMSetCleanup(landing_pad, llvm::True);
847 fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
848 unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
851 fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
852 let name = const_cstr!("cleanuppad");
854 llvm::LLVMRustBuildCleanupPad(
857 args.len() as c_uint,
862 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
867 funclet: &Funclet<'ll>,
868 unwind: Option<&'ll BasicBlock>,
871 unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
872 ret.expect("LLVM does not have support for cleanupret")
875 fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
876 let name = const_cstr!("catchpad");
878 llvm::LLVMRustBuildCatchPad(
881 args.len() as c_uint,
886 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
891 parent: Option<&'ll Value>,
892 unwind: Option<&'ll BasicBlock>,
895 let name = const_cstr!("catchswitch");
897 llvm::LLVMRustBuildCatchSwitch(
901 num_handlers as c_uint,
905 ret.expect("LLVM does not have support for catchswitch")
908 fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
910 llvm::LLVMRustAddHandler(catch_switch, handler);
914 fn set_personality_fn(&mut self, personality: &'ll Value) {
916 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
926 order: rustc_codegen_ssa::common::AtomicOrdering,
927 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
930 let weak = if weak { llvm::True } else { llvm::False };
932 llvm::LLVMRustBuildAtomicCmpXchg(
937 AtomicOrdering::from_generic(order),
938 AtomicOrdering::from_generic(failure_order),
945 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
948 order: rustc_codegen_ssa::common::AtomicOrdering,
951 llvm::LLVMBuildAtomicRMW(
953 AtomicRmwBinOp::from_generic(op),
956 AtomicOrdering::from_generic(order),
964 order: rustc_codegen_ssa::common::AtomicOrdering,
965 scope: rustc_codegen_ssa::common::SynchronizationScope,
968 llvm::LLVMRustBuildAtomicFence(
970 AtomicOrdering::from_generic(order),
971 SynchronizationScope::from_generic(scope),
976 fn set_invariant_load(&mut self, load: &'ll Value) {
978 llvm::LLVMSetMetadata(
980 llvm::MD_invariant_load as c_uint,
981 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
986 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
987 self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
990 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
991 self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
998 funclet: Option<&Funclet<'ll>>,
1000 debug!("call {:?} with args ({:?})", llfn, args);
1002 let args = self.check_call("call", llfn, args);
1003 let bundle = funclet.map(|funclet| funclet.bundle());
1004 let bundle = bundle.as_ref().map(|b| &*b.raw);
1007 llvm::LLVMRustBuildCall(
1010 args.as_ptr() as *const &llvm::Value,
1011 args.len() as c_uint,
1018 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1019 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
1022 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1026 unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
1027 llvm::LLVMDeleteBasicBlock(bb);
1030 fn do_not_inline(&mut self, llret: &'ll Value) {
1031 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
1035 impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
1036 fn get_static(&mut self, def_id: DefId) -> &'ll Value {
1037 // Forward to the `get_static` method of `CodegenCx`
1038 self.cx().get_static(def_id)
1042 impl Builder<'a, 'll, 'tcx> {
1043 pub fn llfn(&self) -> &'ll Value {
1044 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
1047 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
1049 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
1053 pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1054 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1057 pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1058 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1061 pub fn insert_element(
1067 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
1070 pub fn shuffle_vector(
1076 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
1079 pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1080 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1082 pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1083 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1085 pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1087 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1088 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1092 pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1094 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1095 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1099 pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1100 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1102 pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1103 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1105 pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1106 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1108 pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1109 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1111 pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1112 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1114 pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1116 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
1119 pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1121 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
1124 pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1127 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1128 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1132 pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1135 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1136 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1140 pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1141 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1143 pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1144 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1147 pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1149 llvm::LLVMAddClause(landing_pad, clause);
1153 pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1155 unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
1156 ret.expect("LLVM does not have support for catchret")
1159 fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
1160 let dest_ptr_ty = self.cx.val_ty(ptr);
1161 let stored_ty = self.cx.val_ty(val);
1162 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1164 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1166 if dest_ptr_ty == stored_ptr_ty {
1170 "type mismatch in store. \
1171 Expected {:?}, got {:?}; inserting bitcast",
1172 dest_ptr_ty, stored_ptr_ty
1174 self.bitcast(ptr, stored_ptr_ty)
1182 args: &'b [&'ll Value],
1183 ) -> Cow<'b, [&'ll Value]> {
1184 let mut fn_ty = self.cx.val_ty(llfn);
1185 // Strip off pointers
1186 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1187 fn_ty = self.cx.element_type(fn_ty);
1191 self.cx.type_kind(fn_ty) == TypeKind::Function,
1192 "builder::{} not passed a function, but {:?}",
1197 let param_tys = self.cx.func_params_types(fn_ty);
1199 let all_args_match = param_tys
1201 .zip(args.iter().map(|&v| self.val_ty(v)))
1202 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1205 return Cow::Borrowed(args);
1208 let casted_args: Vec<_> = param_tys
1212 .map(|(i, (expected_ty, &actual_val))| {
1213 let actual_ty = self.val_ty(actual_val);
1214 if expected_ty != actual_ty {
1216 "type mismatch in function call of {:?}. \
1217 Expected {:?} for param {}, got {:?}; injecting bitcast",
1218 llfn, expected_ty, i, actual_ty
1220 self.bitcast(actual_val, expected_ty)
1227 Cow::Owned(casted_args)
1230 pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
1231 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1234 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1235 if self.cx.sess().opts.optimize == config::OptLevel::No {
1239 let size = size.bytes();
1244 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1246 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1247 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1250 fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
1251 assert_eq!(vals.len(), bbs.len());
1252 let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
1254 llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
1259 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1261 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);