1 use crate::common::Funclet;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, BasicBlock, False};
4 use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::value::Value;
8 use libc::{c_char, c_uint};
9 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
10 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
11 use rustc_codegen_ssa::mir::place::PlaceRef;
12 use rustc_codegen_ssa::traits::*;
13 use rustc_codegen_ssa::MemFlags;
14 use rustc_data_structures::const_cstr;
15 use rustc_data_structures::small_c_str::SmallCStr;
16 use rustc_hir::def_id::DefId;
17 use rustc_middle::ty::layout::TyAndLayout;
18 use rustc_middle::ty::{self, Ty, TyCtxt};
20 use rustc_target::abi::{self, Align, Size};
21 use rustc_target::spec::{HasTargetSpec, Target};
24 use std::iter::TrustedLen;
25 use std::ops::{Deref, Range};
29 // All Builders must have an llfn associated with them
31 pub struct Builder<'a, 'll, 'tcx> {
32 pub llbuilder: &'ll mut llvm::Builder<'ll>,
33 pub cx: &'a CodegenCx<'ll, 'tcx>,
36 impl Drop for Builder<'a, 'll, 'tcx> {
39 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
44 // FIXME(eddyb) use a checked constructor when they become `const fn`.
45 const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
47 /// Empty string, to be used where LLVM expects an instruction name, indicating
48 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
49 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
50 const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
52 impl BackendTypes for Builder<'_, 'll, 'tcx> {
53 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
54 type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
55 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
56 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
57 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
59 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
60 type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
63 impl abi::HasDataLayout for Builder<'_, '_, '_> {
64 fn data_layout(&self) -> &abi::TargetDataLayout {
69 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
70 fn tcx(&self) -> TyCtxt<'tcx> {
75 impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
76 fn param_env(&self) -> ty::ParamEnv<'tcx> {
81 impl HasTargetSpec for Builder<'_, '_, 'tcx> {
82 fn target_spec(&self) -> &Target {
83 &self.cx.target_spec()
87 impl abi::LayoutOf for Builder<'_, '_, 'tcx> {
89 type TyAndLayout = TyAndLayout<'tcx>;
91 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
96 impl Deref for Builder<'_, 'll, 'tcx> {
97 type Target = CodegenCx<'ll, 'tcx>;
99 fn deref(&self) -> &Self::Target {
104 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
105 type CodegenCx = CodegenCx<'ll, 'tcx>;
108 macro_rules! builder_methods_for_value_instructions {
109 ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
110 $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
112 llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
118 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
119 fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self {
120 let mut bx = Builder::with_cx(cx);
122 let name = SmallCStr::new(name);
123 llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
125 bx.position_at_end(llbb);
129 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
130 // Create a fresh builder from the crate context.
131 let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
132 Builder { llbuilder, cx }
135 fn build_sibling_block(&self, name: &str) -> Self {
136 Builder::new_block(self.cx, self.llfn(), name)
139 fn llbb(&self) -> &'ll BasicBlock {
140 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
143 fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
145 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
149 fn ret_void(&mut self) {
151 llvm::LLVMBuildRetVoid(self.llbuilder);
155 fn ret(&mut self, v: &'ll Value) {
157 llvm::LLVMBuildRet(self.llbuilder, v);
161 fn br(&mut self, dest: &'ll BasicBlock) {
163 llvm::LLVMBuildBr(self.llbuilder, dest);
170 then_llbb: &'ll BasicBlock,
171 else_llbb: &'ll BasicBlock,
174 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
181 else_llbb: &'ll BasicBlock,
182 cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)> + TrustedLen,
185 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
186 for (on_val, dest) in cases {
187 let on_val = self.const_uint_big(self.val_ty(v), on_val);
188 unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
196 then: &'ll BasicBlock,
197 catch: &'ll BasicBlock,
198 funclet: Option<&Funclet<'ll>>,
200 debug!("invoke {:?} with args ({:?})", llfn, args);
202 let args = self.check_call("invoke", llfn, args);
203 let bundle = funclet.map(|funclet| funclet.bundle());
204 let bundle = bundle.as_ref().map(|b| &*b.raw);
207 llvm::LLVMRustBuildInvoke(
211 args.len() as c_uint,
220 fn unreachable(&mut self) {
222 llvm::LLVMBuildUnreachable(self.llbuilder);
226 builder_methods_for_value_instructions! {
227 add(a, b) => LLVMBuildAdd,
228 fadd(a, b) => LLVMBuildFAdd,
229 sub(a, b) => LLVMBuildSub,
230 fsub(a, b) => LLVMBuildFSub,
231 mul(a, b) => LLVMBuildMul,
232 fmul(a, b) => LLVMBuildFMul,
233 udiv(a, b) => LLVMBuildUDiv,
234 exactudiv(a, b) => LLVMBuildExactUDiv,
235 sdiv(a, b) => LLVMBuildSDiv,
236 exactsdiv(a, b) => LLVMBuildExactSDiv,
237 fdiv(a, b) => LLVMBuildFDiv,
238 urem(a, b) => LLVMBuildURem,
239 srem(a, b) => LLVMBuildSRem,
240 frem(a, b) => LLVMBuildFRem,
241 shl(a, b) => LLVMBuildShl,
242 lshr(a, b) => LLVMBuildLShr,
243 ashr(a, b) => LLVMBuildAShr,
244 and(a, b) => LLVMBuildAnd,
245 or(a, b) => LLVMBuildOr,
246 xor(a, b) => LLVMBuildXor,
247 neg(x) => LLVMBuildNeg,
248 fneg(x) => LLVMBuildFNeg,
249 not(x) => LLVMBuildNot,
250 unchecked_sadd(x, y) => LLVMBuildNSWAdd,
251 unchecked_uadd(x, y) => LLVMBuildNUWAdd,
252 unchecked_ssub(x, y) => LLVMBuildNSWSub,
253 unchecked_usub(x, y) => LLVMBuildNUWSub,
254 unchecked_smul(x, y) => LLVMBuildNSWMul,
255 unchecked_umul(x, y) => LLVMBuildNUWMul,
258 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
260 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
261 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
266 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
268 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
269 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
274 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
276 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
277 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
282 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
284 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
285 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
290 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
292 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
293 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
304 ) -> (Self::Value, Self::Value) {
305 use rustc_ast::IntTy::*;
306 use rustc_ast::UintTy::*;
307 use rustc_middle::ty::{Int, Uint};
309 let new_kind = match ty.kind() {
310 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
311 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
312 t @ (Uint(_) | Int(_)) => t.clone(),
313 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
316 let name = match oop {
317 OverflowOp::Add => match new_kind {
318 Int(I8) => "llvm.sadd.with.overflow.i8",
319 Int(I16) => "llvm.sadd.with.overflow.i16",
320 Int(I32) => "llvm.sadd.with.overflow.i32",
321 Int(I64) => "llvm.sadd.with.overflow.i64",
322 Int(I128) => "llvm.sadd.with.overflow.i128",
324 Uint(U8) => "llvm.uadd.with.overflow.i8",
325 Uint(U16) => "llvm.uadd.with.overflow.i16",
326 Uint(U32) => "llvm.uadd.with.overflow.i32",
327 Uint(U64) => "llvm.uadd.with.overflow.i64",
328 Uint(U128) => "llvm.uadd.with.overflow.i128",
332 OverflowOp::Sub => match new_kind {
333 Int(I8) => "llvm.ssub.with.overflow.i8",
334 Int(I16) => "llvm.ssub.with.overflow.i16",
335 Int(I32) => "llvm.ssub.with.overflow.i32",
336 Int(I64) => "llvm.ssub.with.overflow.i64",
337 Int(I128) => "llvm.ssub.with.overflow.i128",
339 Uint(U8) => "llvm.usub.with.overflow.i8",
340 Uint(U16) => "llvm.usub.with.overflow.i16",
341 Uint(U32) => "llvm.usub.with.overflow.i32",
342 Uint(U64) => "llvm.usub.with.overflow.i64",
343 Uint(U128) => "llvm.usub.with.overflow.i128",
347 OverflowOp::Mul => match new_kind {
348 Int(I8) => "llvm.smul.with.overflow.i8",
349 Int(I16) => "llvm.smul.with.overflow.i16",
350 Int(I32) => "llvm.smul.with.overflow.i32",
351 Int(I64) => "llvm.smul.with.overflow.i64",
352 Int(I128) => "llvm.smul.with.overflow.i128",
354 Uint(U8) => "llvm.umul.with.overflow.i8",
355 Uint(U16) => "llvm.umul.with.overflow.i16",
356 Uint(U32) => "llvm.umul.with.overflow.i32",
357 Uint(U64) => "llvm.umul.with.overflow.i64",
358 Uint(U128) => "llvm.umul.with.overflow.i128",
364 let intrinsic = self.get_intrinsic(&name);
365 let res = self.call(intrinsic, &[lhs, rhs], None);
366 (self.extract_value(res, 0), self.extract_value(res, 1))
369 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
370 if self.cx().val_ty(val) == self.cx().type_i1() {
371 self.zext(val, self.cx().type_i8())
376 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
377 if scalar.is_bool() {
378 return self.trunc(val, self.cx().type_i1());
383 fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
384 let mut bx = Builder::with_cx(self.cx);
385 bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
386 bx.dynamic_alloca(ty, align)
389 fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
391 let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
392 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
397 fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
399 let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
400 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
405 fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
407 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
408 llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
413 fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
415 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
416 llvm::LLVMSetVolatile(load, llvm::True);
424 order: rustc_codegen_ssa::common::AtomicOrdering,
428 let load = llvm::LLVMRustBuildAtomicLoad(
432 AtomicOrdering::from_generic(order),
434 // LLVM requires the alignment of atomic loads to be at least the size of the type.
435 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
440 fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
441 debug!("PlaceRef::load: {:?}", place);
443 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
445 if place.layout.is_zst() {
446 return OperandRef::new_zst(self, place.layout);
449 fn scalar_load_metadata<'a, 'll, 'tcx>(
450 bx: &mut Builder<'a, 'll, 'tcx>,
452 scalar: &abi::Scalar,
454 let vr = scalar.valid_range.clone();
457 let range = scalar.valid_range_exclusive(bx);
458 if range.start != range.end {
459 bx.range_metadata(load, range);
462 abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
463 bx.nonnull_metadata(load);
469 let val = if let Some(llextra) = place.llextra {
470 OperandValue::Ref(place.llval, Some(llextra), place.align)
471 } else if place.layout.is_llvm_immediate() {
472 let mut const_llval = None;
474 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
475 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
476 const_llval = llvm::LLVMGetInitializer(global);
480 let llval = const_llval.unwrap_or_else(|| {
481 let load = self.load(place.llval, place.align);
482 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
483 scalar_load_metadata(self, load, scalar);
487 OperandValue::Immediate(self.to_immediate(llval, place.layout))
488 } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
489 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
491 let mut load = |i, scalar: &abi::Scalar, align| {
492 let llptr = self.struct_gep(place.llval, i as u64);
493 let load = self.load(llptr, align);
494 scalar_load_metadata(self, load, scalar);
495 self.to_immediate_scalar(load, scalar)
499 load(0, a, place.align),
500 load(1, b, place.align.restrict_for_offset(b_offset)),
503 OperandValue::Ref(place.llval, None, place.align)
506 OperandRef { val, layout: place.layout }
509 fn write_operand_repeatedly(
511 cg_elem: OperandRef<'tcx, &'ll Value>,
513 dest: PlaceRef<'tcx, &'ll Value>,
515 let zero = self.const_usize(0);
516 let count = self.const_usize(count);
517 let start = dest.project_index(&mut self, zero).llval;
518 let end = dest.project_index(&mut self, count).llval;
520 let mut header_bx = self.build_sibling_block("repeat_loop_header");
521 let mut body_bx = self.build_sibling_block("repeat_loop_body");
522 let next_bx = self.build_sibling_block("repeat_loop_next");
524 self.br(header_bx.llbb());
525 let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
527 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
528 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
530 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
533 .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
535 let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
536 body_bx.br(header_bx.llbb());
537 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
542 fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
543 if self.sess().target.target.arch == "amdgpu" {
544 // amdgpu/LLVM does something weird and thinks a i64 value is
545 // split into a v2i32, halving the bitwidth LLVM expects,
546 // tripping an assertion. So, for now, just disable this
552 let llty = self.cx.val_ty(load);
554 self.cx.const_uint_big(llty, range.start),
555 self.cx.const_uint_big(llty, range.end),
558 llvm::LLVMSetMetadata(
560 llvm::MD_range as c_uint,
561 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
566 fn nonnull_metadata(&mut self, load: &'ll Value) {
568 llvm::LLVMSetMetadata(
570 llvm::MD_nonnull as c_uint,
571 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
576 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
577 self.store_with_flags(val, ptr, align, MemFlags::empty())
587 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
588 let ptr = self.check_store(val, ptr);
590 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
592 if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
593 llvm::LLVMSetAlignment(store, align);
594 if flags.contains(MemFlags::VOLATILE) {
595 llvm::LLVMSetVolatile(store, llvm::True);
597 if flags.contains(MemFlags::NONTEMPORAL) {
598 // According to LLVM [1] building a nontemporal store must
599 // *always* point to a metadata value of the integer 1.
601 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
602 let one = self.cx.const_i32(1);
603 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
604 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
614 order: rustc_codegen_ssa::common::AtomicOrdering,
617 debug!("Store {:?} -> {:?}", val, ptr);
618 let ptr = self.check_store(val, ptr);
620 let store = llvm::LLVMRustBuildAtomicStore(
624 AtomicOrdering::from_generic(order),
626 // LLVM requires the alignment of atomic stores to be at least the size of the type.
627 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
631 fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
637 indices.len() as c_uint,
643 fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
645 llvm::LLVMBuildInBoundsGEP(
649 indices.len() as c_uint,
655 fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
656 assert_eq!(idx as c_uint as u64, idx);
657 unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
661 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
662 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
665 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
666 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
669 fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
670 // WebAssembly has saturating floating point to integer casts if the
671 // `nontrapping-fptoint` target feature is activated. We'll use those if
672 // they are available.
673 if self.sess().target.target.arch == "wasm32"
674 && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
676 let src_ty = self.cx.val_ty(val);
677 let float_width = self.cx.float_width(src_ty);
678 let int_width = self.cx.int_width(dest_ty);
679 let name = match (int_width, float_width) {
680 (32, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f32"),
681 (32, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f64"),
682 (64, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f32"),
683 (64, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f64"),
686 if let Some(name) = name {
687 let intrinsic = self.get_intrinsic(name);
688 return Some(self.call(intrinsic, &[val], None));
694 fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
695 // WebAssembly has saturating floating point to integer casts if the
696 // `nontrapping-fptoint` target feature is activated. We'll use those if
697 // they are available.
698 if self.sess().target.target.arch == "wasm32"
699 && self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
701 let src_ty = self.cx.val_ty(val);
702 let float_width = self.cx.float_width(src_ty);
703 let int_width = self.cx.int_width(dest_ty);
704 let name = match (int_width, float_width) {
705 (32, 32) => Some("llvm.wasm.trunc.saturate.signed.i32.f32"),
706 (32, 64) => Some("llvm.wasm.trunc.saturate.signed.i32.f64"),
707 (64, 32) => Some("llvm.wasm.trunc.saturate.signed.i64.f32"),
708 (64, 64) => Some("llvm.wasm.trunc.saturate.signed.i64.f64"),
711 if let Some(name) = name {
712 let intrinsic = self.get_intrinsic(name);
713 return Some(self.call(intrinsic, &[val], None));
719 fn fptosui_may_trap(&self, val: &'ll Value, dest_ty: &'ll Type) -> bool {
720 // Most of the time we'll be generating the `fptosi` or `fptoui`
721 // instruction for floating-point-to-integer conversions. These
722 // instructions by definition in LLVM do not trap. For the WebAssembly
723 // target, however, we'll lower in some cases to intrinsic calls instead
724 // which may trap. If we detect that this is a situation where we'll be
725 // using the intrinsics then we report that the call map trap, which
726 // callers might need to handle.
727 if !self.wasm_and_missing_nontrapping_fptoint() {
730 let src_ty = self.cx.val_ty(val);
731 let float_width = self.cx.float_width(src_ty);
732 let int_width = self.cx.int_width(dest_ty);
733 match (int_width, float_width) {
734 (32, 32) | (32, 64) | (64, 32) | (64, 64) => true,
739 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
740 // When we can, use the native wasm intrinsics which have tighter
741 // codegen. Note that this has a semantic difference in that the
742 // intrinsic can trap whereas `fptoui` never traps. That difference,
743 // however, is handled by `fptosui_may_trap` above.
745 // Note that we skip the wasm intrinsics for vector types where `fptoui`
746 // must be used instead.
747 if self.wasm_and_missing_nontrapping_fptoint() {
748 let src_ty = self.cx.val_ty(val);
749 if self.cx.type_kind(src_ty) != TypeKind::Vector {
750 let float_width = self.cx.float_width(src_ty);
751 let int_width = self.cx.int_width(dest_ty);
752 let name = match (int_width, float_width) {
753 (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
754 (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
755 (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
756 (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
759 if let Some(name) = name {
760 let intrinsic = self.get_intrinsic(name);
761 return self.call(intrinsic, &[val], None);
765 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
768 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
769 if self.wasm_and_missing_nontrapping_fptoint() {
770 let src_ty = self.cx.val_ty(val);
771 if self.cx.type_kind(src_ty) != TypeKind::Vector {
772 let float_width = self.cx.float_width(src_ty);
773 let int_width = self.cx.int_width(dest_ty);
774 let name = match (int_width, float_width) {
775 (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
776 (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
777 (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
778 (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
781 if let Some(name) = name {
782 let intrinsic = self.get_intrinsic(name);
783 return self.call(intrinsic, &[val], None);
787 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
790 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
791 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
794 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
795 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
798 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
799 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
802 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
803 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
806 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
807 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
810 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
811 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
814 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
815 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
818 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
819 unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
822 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
823 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
827 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
828 let op = llvm::IntPredicate::from_generic(op);
829 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
832 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
833 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
836 /* Miscellaneous instructions */
846 if flags.contains(MemFlags::NONTEMPORAL) {
847 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
848 let val = self.load(src, src_align);
849 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
850 self.store_with_flags(val, ptr, dst_align, flags);
853 let size = self.intcast(size, self.type_isize(), false);
854 let is_volatile = flags.contains(MemFlags::VOLATILE);
855 let dst = self.pointercast(dst, self.type_i8p());
856 let src = self.pointercast(src, self.type_i8p());
858 llvm::LLVMRustBuildMemCpy(
861 dst_align.bytes() as c_uint,
863 src_align.bytes() as c_uint,
879 if flags.contains(MemFlags::NONTEMPORAL) {
880 // HACK(nox): This is inefficient but there is no nontemporal memmove.
881 let val = self.load(src, src_align);
882 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
883 self.store_with_flags(val, ptr, dst_align, flags);
886 let size = self.intcast(size, self.type_isize(), false);
887 let is_volatile = flags.contains(MemFlags::VOLATILE);
888 let dst = self.pointercast(dst, self.type_i8p());
889 let src = self.pointercast(src, self.type_i8p());
891 llvm::LLVMRustBuildMemMove(
894 dst_align.bytes() as c_uint,
896 src_align.bytes() as c_uint,
906 fill_byte: &'ll Value,
911 let is_volatile = flags.contains(MemFlags::VOLATILE);
912 let ptr = self.pointercast(ptr, self.type_i8p());
914 llvm::LLVMRustBuildMemSet(
917 align.bytes() as c_uint,
928 then_val: &'ll Value,
929 else_val: &'ll Value,
931 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
934 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
935 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
938 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
939 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
942 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
944 let elt_ty = self.cx.val_ty(elt);
945 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
946 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
947 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
948 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
952 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
953 assert_eq!(idx as c_uint as u64, idx);
954 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
957 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
958 assert_eq!(idx as c_uint as u64, idx);
959 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
969 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
973 fn set_cleanup(&mut self, landing_pad: &'ll Value) {
975 llvm::LLVMSetCleanup(landing_pad, llvm::True);
979 fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
980 unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
983 fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
984 let name = const_cstr!("cleanuppad");
986 llvm::LLVMRustBuildCleanupPad(
989 args.len() as c_uint,
994 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
999 funclet: &Funclet<'ll>,
1000 unwind: Option<&'ll BasicBlock>,
1003 unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
1004 ret.expect("LLVM does not have support for cleanupret")
1007 fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
1008 let name = const_cstr!("catchpad");
1010 llvm::LLVMRustBuildCatchPad(
1013 args.len() as c_uint,
1018 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
1023 parent: Option<&'ll Value>,
1024 unwind: Option<&'ll BasicBlock>,
1025 num_handlers: usize,
1027 let name = const_cstr!("catchswitch");
1029 llvm::LLVMRustBuildCatchSwitch(
1033 num_handlers as c_uint,
1037 ret.expect("LLVM does not have support for catchswitch")
1040 fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
1042 llvm::LLVMRustAddHandler(catch_switch, handler);
1046 fn set_personality_fn(&mut self, personality: &'ll Value) {
1048 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1052 // Atomic Operations
1058 order: rustc_codegen_ssa::common::AtomicOrdering,
1059 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
1062 let weak = if weak { llvm::True } else { llvm::False };
1064 llvm::LLVMRustBuildAtomicCmpXchg(
1069 AtomicOrdering::from_generic(order),
1070 AtomicOrdering::from_generic(failure_order),
1077 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
1080 order: rustc_codegen_ssa::common::AtomicOrdering,
1083 llvm::LLVMBuildAtomicRMW(
1085 AtomicRmwBinOp::from_generic(op),
1088 AtomicOrdering::from_generic(order),
1096 order: rustc_codegen_ssa::common::AtomicOrdering,
1097 scope: rustc_codegen_ssa::common::SynchronizationScope,
1100 llvm::LLVMRustBuildAtomicFence(
1102 AtomicOrdering::from_generic(order),
1103 SynchronizationScope::from_generic(scope),
1108 fn set_invariant_load(&mut self, load: &'ll Value) {
1110 llvm::LLVMSetMetadata(
1112 llvm::MD_invariant_load as c_uint,
1113 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
1118 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
1119 self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
1122 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
1123 self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
1126 fn instrprof_increment(
1128 fn_name: &'ll Value,
1130 num_counters: &'ll Value,
1134 "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
1135 fn_name, hash, num_counters, index
1138 let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
1139 let args = &[fn_name, hash, num_counters, index];
1140 let args = self.check_call("call", llfn, args);
1143 let _ = llvm::LLVMRustBuildCall(
1146 args.as_ptr() as *const &llvm::Value,
1147 args.len() as c_uint,
1156 args: &[&'ll Value],
1157 funclet: Option<&Funclet<'ll>>,
1159 debug!("call {:?} with args ({:?})", llfn, args);
1161 let args = self.check_call("call", llfn, args);
1162 let bundle = funclet.map(|funclet| funclet.bundle());
1163 let bundle = bundle.as_ref().map(|b| &*b.raw);
1166 llvm::LLVMRustBuildCall(
1169 args.as_ptr() as *const &llvm::Value,
1170 args.len() as c_uint,
1176 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1177 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
1180 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1184 unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
1185 llvm::LLVMDeleteBasicBlock(bb);
1188 fn do_not_inline(&mut self, llret: &'ll Value) {
1189 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
1193 impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
1194 fn get_static(&mut self, def_id: DefId) -> &'ll Value {
1195 // Forward to the `get_static` method of `CodegenCx`
1196 self.cx().get_static(def_id)
1200 impl Builder<'a, 'll, 'tcx> {
1201 pub fn llfn(&self) -> &'ll Value {
1202 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
1205 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
1207 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
1211 pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1212 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1215 pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1216 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1219 pub fn insert_element(
1225 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
1228 pub fn shuffle_vector(
1234 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
1237 pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1238 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1240 pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1241 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1243 pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1245 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1246 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1250 pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1252 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1253 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1257 pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1258 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1260 pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1261 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1263 pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1264 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1266 pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1267 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1269 pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1270 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1272 pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1274 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
1277 pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1279 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
1282 pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1285 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1286 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1290 pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1293 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1294 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1298 pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1299 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1301 pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1302 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1305 pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1307 llvm::LLVMAddClause(landing_pad, clause);
1311 pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1313 unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
1314 ret.expect("LLVM does not have support for catchret")
1317 fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
1318 let dest_ptr_ty = self.cx.val_ty(ptr);
1319 let stored_ty = self.cx.val_ty(val);
1320 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1322 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1324 if dest_ptr_ty == stored_ptr_ty {
1328 "type mismatch in store. \
1329 Expected {:?}, got {:?}; inserting bitcast",
1330 dest_ptr_ty, stored_ptr_ty
1332 self.bitcast(ptr, stored_ptr_ty)
1340 args: &'b [&'ll Value],
1341 ) -> Cow<'b, [&'ll Value]> {
1342 let mut fn_ty = self.cx.val_ty(llfn);
1343 // Strip off pointers
1344 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1345 fn_ty = self.cx.element_type(fn_ty);
1349 self.cx.type_kind(fn_ty) == TypeKind::Function,
1350 "builder::{} not passed a function, but {:?}",
1355 let param_tys = self.cx.func_params_types(fn_ty);
1357 let all_args_match = param_tys
1359 .zip(args.iter().map(|&v| self.val_ty(v)))
1360 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1363 return Cow::Borrowed(args);
1366 let casted_args: Vec<_> = param_tys
1370 .map(|(i, (expected_ty, &actual_val))| {
1371 let actual_ty = self.val_ty(actual_val);
1372 if expected_ty != actual_ty {
1374 "type mismatch in function call of {:?}. \
1375 Expected {:?} for param {}, got {:?}; injecting bitcast",
1376 llfn, expected_ty, i, actual_ty
1378 self.bitcast(actual_val, expected_ty)
1385 Cow::Owned(casted_args)
1388 pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
1389 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1392 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1393 let size = size.bytes();
1398 if !self.cx().sess().emit_lifetime_markers() {
1402 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1404 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1405 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1411 vals: &[&'ll Value],
1412 bbs: &[&'ll BasicBlock],
1414 assert_eq!(vals.len(), bbs.len());
1415 let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
1417 llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
1422 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1424 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1428 fn wasm_and_missing_nontrapping_fptoint(&self) -> bool {
1429 self.sess().target.target.arch == "wasm32"
1430 && !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)