1 use crate::abi::FnAbiLlvmExt;
3 use crate::common::Funclet;
4 use crate::context::CodegenCx;
5 use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock};
6 use crate::type_::Type;
7 use crate::type_of::LayoutLlvmExt;
8 use crate::value::Value;
10 use libc::{c_char, c_uint};
11 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
12 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
13 use rustc_codegen_ssa::mir::place::PlaceRef;
14 use rustc_codegen_ssa::traits::*;
15 use rustc_codegen_ssa::MemFlags;
16 use rustc_data_structures::small_c_str::SmallCStr;
17 use rustc_hir::def_id::DefId;
18 use rustc_middle::ty::layout::{
19 FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
21 use rustc_middle::ty::{self, Ty, TyCtxt};
23 use rustc_symbol_mangling::typeid::kcfi_typeid_for_fnabi;
24 use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
25 use rustc_target::spec::{HasTargetSpec, Target};
32 // All Builders must have an llfn associated with them
34 pub struct Builder<'a, 'll, 'tcx> {
35 pub llbuilder: &'ll mut llvm::Builder<'ll>,
36 pub cx: &'a CodegenCx<'ll, 'tcx>,
39 impl Drop for Builder<'_, '_, '_> {
42 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
47 // FIXME(eddyb) use a checked constructor when they become `const fn`.
48 const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
50 /// Empty string, to be used where LLVM expects an instruction name, indicating
51 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
52 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
53 const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
55 impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
56 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
57 type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
58 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
59 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
60 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
62 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
63 type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
64 type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
67 impl abi::HasDataLayout for Builder<'_, '_, '_> {
68 fn data_layout(&self) -> &abi::TargetDataLayout {
73 impl<'tcx> ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
75 fn tcx(&self) -> TyCtxt<'tcx> {
80 impl<'tcx> ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
81 fn param_env(&self) -> ty::ParamEnv<'tcx> {
86 impl HasTargetSpec for Builder<'_, '_, '_> {
88 fn target_spec(&self) -> &Target {
93 impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
94 type LayoutOfResult = TyAndLayout<'tcx>;
97 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
98 self.cx.handle_layout_err(err, span, ty)
102 impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
103 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
106 fn handle_fn_abi_err(
108 err: FnAbiError<'tcx>,
110 fn_abi_request: FnAbiRequest<'tcx>,
112 self.cx.handle_fn_abi_err(err, span, fn_abi_request)
116 impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
117 type Target = CodegenCx<'ll, 'tcx>;
120 fn deref(&self) -> &Self::Target {
125 impl<'ll, 'tcx> HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
126 type CodegenCx = CodegenCx<'ll, 'tcx>;
129 macro_rules! builder_methods_for_value_instructions {
130 ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
131 $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
133 llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
139 impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
140 fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self {
141 let bx = Builder::with_cx(cx);
143 llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb);
148 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
152 fn llbb(&self) -> &'ll BasicBlock {
153 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
156 fn set_span(&mut self, _span: Span) {}
158 fn append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock {
160 let name = SmallCStr::new(name);
161 llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
165 fn append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock {
166 Self::append_block(self.cx, self.llfn(), name)
169 fn switch_to_block(&mut self, llbb: Self::BasicBlock) {
170 *self = Self::build(self.cx, llbb)
173 fn ret_void(&mut self) {
175 llvm::LLVMBuildRetVoid(self.llbuilder);
179 fn ret(&mut self, v: &'ll Value) {
181 llvm::LLVMBuildRet(self.llbuilder, v);
185 fn br(&mut self, dest: &'ll BasicBlock) {
187 llvm::LLVMBuildBr(self.llbuilder, dest);
194 then_llbb: &'ll BasicBlock,
195 else_llbb: &'ll BasicBlock,
198 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
205 else_llbb: &'ll BasicBlock,
206 cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
209 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
210 for (on_val, dest) in cases {
211 let on_val = self.const_uint_big(self.val_ty(v), on_val);
212 unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
219 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
222 then: &'ll BasicBlock,
223 catch: &'ll BasicBlock,
224 funclet: Option<&Funclet<'ll>>,
226 debug!("invoke {:?} with args ({:?})", llfn, args);
228 let args = self.check_call("invoke", llty, llfn, args);
229 let funclet_bundle = funclet.map(|funclet| funclet.bundle());
230 let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
231 let mut bundles = vec![funclet_bundle];
233 // Set KCFI operand bundle
234 let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
236 if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
237 let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi);
238 Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
242 if kcfi_bundle.is_some() {
243 let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
244 bundles.push(kcfi_bundle);
247 bundles.retain(|bundle| bundle.is_some());
248 let invoke = unsafe {
249 llvm::LLVMRustBuildInvoke(
254 args.len() as c_uint,
258 bundles.len() as c_uint,
262 if let Some(fn_abi) = fn_abi {
263 fn_abi.apply_attrs_callsite(self, invoke);
268 fn unreachable(&mut self) {
270 llvm::LLVMBuildUnreachable(self.llbuilder);
274 builder_methods_for_value_instructions! {
275 add(a, b) => LLVMBuildAdd,
276 fadd(a, b) => LLVMBuildFAdd,
277 sub(a, b) => LLVMBuildSub,
278 fsub(a, b) => LLVMBuildFSub,
279 mul(a, b) => LLVMBuildMul,
280 fmul(a, b) => LLVMBuildFMul,
281 udiv(a, b) => LLVMBuildUDiv,
282 exactudiv(a, b) => LLVMBuildExactUDiv,
283 sdiv(a, b) => LLVMBuildSDiv,
284 exactsdiv(a, b) => LLVMBuildExactSDiv,
285 fdiv(a, b) => LLVMBuildFDiv,
286 urem(a, b) => LLVMBuildURem,
287 srem(a, b) => LLVMBuildSRem,
288 frem(a, b) => LLVMBuildFRem,
289 shl(a, b) => LLVMBuildShl,
290 lshr(a, b) => LLVMBuildLShr,
291 ashr(a, b) => LLVMBuildAShr,
292 and(a, b) => LLVMBuildAnd,
293 or(a, b) => LLVMBuildOr,
294 xor(a, b) => LLVMBuildXor,
295 neg(x) => LLVMBuildNeg,
296 fneg(x) => LLVMBuildFNeg,
297 not(x) => LLVMBuildNot,
298 unchecked_sadd(x, y) => LLVMBuildNSWAdd,
299 unchecked_uadd(x, y) => LLVMBuildNUWAdd,
300 unchecked_ssub(x, y) => LLVMBuildNSWSub,
301 unchecked_usub(x, y) => LLVMBuildNUWSub,
302 unchecked_smul(x, y) => LLVMBuildNSWMul,
303 unchecked_umul(x, y) => LLVMBuildNUWMul,
306 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
308 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
309 llvm::LLVMRustSetFastMath(instr);
314 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
316 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
317 llvm::LLVMRustSetFastMath(instr);
322 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
324 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
325 llvm::LLVMRustSetFastMath(instr);
330 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
332 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
333 llvm::LLVMRustSetFastMath(instr);
338 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
340 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
341 llvm::LLVMRustSetFastMath(instr);
352 ) -> (Self::Value, Self::Value) {
353 use rustc_middle::ty::{Int, Uint};
354 use rustc_middle::ty::{IntTy::*, UintTy::*};
356 let new_kind = match ty.kind() {
357 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
358 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
359 t @ (Uint(_) | Int(_)) => t.clone(),
360 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
363 let name = match oop {
364 OverflowOp::Add => match new_kind {
365 Int(I8) => "llvm.sadd.with.overflow.i8",
366 Int(I16) => "llvm.sadd.with.overflow.i16",
367 Int(I32) => "llvm.sadd.with.overflow.i32",
368 Int(I64) => "llvm.sadd.with.overflow.i64",
369 Int(I128) => "llvm.sadd.with.overflow.i128",
371 Uint(U8) => "llvm.uadd.with.overflow.i8",
372 Uint(U16) => "llvm.uadd.with.overflow.i16",
373 Uint(U32) => "llvm.uadd.with.overflow.i32",
374 Uint(U64) => "llvm.uadd.with.overflow.i64",
375 Uint(U128) => "llvm.uadd.with.overflow.i128",
379 OverflowOp::Sub => match new_kind {
380 Int(I8) => "llvm.ssub.with.overflow.i8",
381 Int(I16) => "llvm.ssub.with.overflow.i16",
382 Int(I32) => "llvm.ssub.with.overflow.i32",
383 Int(I64) => "llvm.ssub.with.overflow.i64",
384 Int(I128) => "llvm.ssub.with.overflow.i128",
387 // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these
388 // to be the canonical form. It will attempt to reform llvm.usub.with.overflow
389 // in the backend if profitable.
390 let sub = self.sub(lhs, rhs);
391 let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs);
397 OverflowOp::Mul => match new_kind {
398 Int(I8) => "llvm.smul.with.overflow.i8",
399 Int(I16) => "llvm.smul.with.overflow.i16",
400 Int(I32) => "llvm.smul.with.overflow.i32",
401 Int(I64) => "llvm.smul.with.overflow.i64",
402 Int(I128) => "llvm.smul.with.overflow.i128",
404 Uint(U8) => "llvm.umul.with.overflow.i8",
405 Uint(U16) => "llvm.umul.with.overflow.i16",
406 Uint(U32) => "llvm.umul.with.overflow.i32",
407 Uint(U64) => "llvm.umul.with.overflow.i64",
408 Uint(U128) => "llvm.umul.with.overflow.i128",
414 let res = self.call_intrinsic(name, &[lhs, rhs]);
415 (self.extract_value(res, 0), self.extract_value(res, 1))
418 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
419 if self.cx().val_ty(val) == self.cx().type_i1() {
420 self.zext(val, self.cx().type_i8())
425 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
426 if scalar.is_bool() {
427 return self.trunc(val, self.cx().type_i1());
432 fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
433 let mut bx = Builder::with_cx(self.cx);
434 bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
436 let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
437 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
442 fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
445 llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
446 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
451 fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
453 let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
454 llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
459 fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
461 let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
462 llvm::LLVMSetVolatile(load, llvm::True);
471 order: rustc_codegen_ssa::common::AtomicOrdering,
475 let load = llvm::LLVMRustBuildAtomicLoad(
480 AtomicOrdering::from_generic(order),
482 // LLVM requires the alignment of atomic loads to be at least the size of the type.
483 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
488 #[instrument(level = "trace", skip(self))]
489 fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
490 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
492 if place.layout.is_zst() {
493 return OperandRef::new_zst(self, place.layout);
496 #[instrument(level = "trace", skip(bx))]
497 fn scalar_load_metadata<'a, 'll, 'tcx>(
498 bx: &mut Builder<'a, 'll, 'tcx>,
501 layout: TyAndLayout<'tcx>,
504 if !scalar.is_always_valid(bx) {
505 bx.noundef_metadata(load);
508 match scalar.primitive() {
510 if !scalar.is_always_valid(bx) {
511 bx.range_metadata(load, scalar.valid_range(bx));
515 if !scalar.valid_range(bx).contains(0) {
516 bx.nonnull_metadata(load);
519 if let Some(pointee) = layout.pointee_info_at(bx, offset) {
520 if let Some(_) = pointee.safe {
521 bx.align_metadata(load, pointee.align);
525 abi::F32 | abi::F64 => {}
529 let val = if let Some(llextra) = place.llextra {
530 OperandValue::Ref(place.llval, Some(llextra), place.align)
531 } else if place.layout.is_llvm_immediate() {
532 let mut const_llval = None;
533 let llty = place.layout.llvm_type(self);
535 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
536 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
537 if let Some(init) = llvm::LLVMGetInitializer(global) {
538 if self.val_ty(init) == llty {
539 const_llval = Some(init);
545 let llval = const_llval.unwrap_or_else(|| {
546 let load = self.load(llty, place.llval, place.align);
547 if let abi::Abi::Scalar(scalar) = place.layout.abi {
548 scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
552 OperandValue::Immediate(self.to_immediate(llval, place.layout))
553 } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
554 let b_offset = a.size(self).align_to(b.align(self).abi);
555 let pair_ty = place.layout.llvm_type(self);
557 let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
558 let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
559 let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
560 let load = self.load(llty, llptr, align);
561 scalar_load_metadata(self, load, scalar, layout, offset);
562 self.to_immediate_scalar(load, scalar)
566 load(0, a, place.layout, place.align, Size::ZERO),
567 load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
570 OperandValue::Ref(place.llval, None, place.align)
573 OperandRef { val, layout: place.layout }
576 fn write_operand_repeatedly(
578 cg_elem: OperandRef<'tcx, &'ll Value>,
580 dest: PlaceRef<'tcx, &'ll Value>,
582 let zero = self.const_usize(0);
583 let count = self.const_usize(count);
584 let start = dest.project_index(self, zero).llval;
585 let end = dest.project_index(self, count).llval;
587 let header_bb = self.append_sibling_block("repeat_loop_header");
588 let body_bb = self.append_sibling_block("repeat_loop_body");
589 let next_bb = self.append_sibling_block("repeat_loop_next");
593 let mut header_bx = Self::build(self.cx, header_bb);
594 let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
596 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
597 header_bx.cond_br(keep_going, body_bb, next_bb);
599 let mut body_bx = Self::build(self.cx, body_bb);
600 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
603 .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
605 let next = body_bx.inbounds_gep(
606 self.backend_type(cg_elem.layout),
608 &[self.const_usize(1)],
610 body_bx.br(header_bb);
611 header_bx.add_incoming_to_phi(current, next, body_bb);
613 *self = Self::build(self.cx, next_bb);
616 fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
617 if self.sess().target.arch == "amdgpu" {
618 // amdgpu/LLVM does something weird and thinks an i64 value is
619 // split into a v2i32, halving the bitwidth LLVM expects,
620 // tripping an assertion. So, for now, just disable this
626 let llty = self.cx.val_ty(load);
628 self.cx.const_uint_big(llty, range.start),
629 self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
632 llvm::LLVMSetMetadata(
634 llvm::MD_range as c_uint,
635 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
640 fn nonnull_metadata(&mut self, load: &'ll Value) {
642 llvm::LLVMSetMetadata(
644 llvm::MD_nonnull as c_uint,
645 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
650 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
651 self.store_with_flags(val, ptr, align, MemFlags::empty())
661 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
662 let ptr = self.check_store(val, ptr);
664 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
666 if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
667 llvm::LLVMSetAlignment(store, align);
668 if flags.contains(MemFlags::VOLATILE) {
669 llvm::LLVMSetVolatile(store, llvm::True);
671 if flags.contains(MemFlags::NONTEMPORAL) {
672 // According to LLVM [1] building a nontemporal store must
673 // *always* point to a metadata value of the integer 1.
675 // [1]: https://llvm.org/docs/LangRef.html#store-instruction
676 let one = self.cx.const_i32(1);
677 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
678 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
688 order: rustc_codegen_ssa::common::AtomicOrdering,
691 debug!("Store {:?} -> {:?}", val, ptr);
692 let ptr = self.check_store(val, ptr);
694 let store = llvm::LLVMRustBuildAtomicStore(
698 AtomicOrdering::from_generic(order),
700 // LLVM requires the alignment of atomic stores to be at least the size of the type.
701 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
705 fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
712 indices.len() as c_uint,
722 indices: &[&'ll Value],
725 llvm::LLVMBuildInBoundsGEP2(
730 indices.len() as c_uint,
736 fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value {
737 assert_eq!(idx as c_uint as u64, idx);
738 unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) }
742 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
743 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
746 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
747 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
750 fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
751 self.fptoint_sat(false, val, dest_ty)
754 fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
755 self.fptoint_sat(true, val, dest_ty)
758 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
759 // On WebAssembly the `fptoui` and `fptosi` instructions currently have
760 // poor codegen. The reason for this is that the corresponding wasm
761 // instructions, `i32.trunc_f32_s` for example, will trap when the float
762 // is out-of-bounds, infinity, or nan. This means that LLVM
763 // automatically inserts control flow around `fptoui` and `fptosi`
764 // because the LLVM instruction `fptoui` is defined as producing a
765 // poison value, not having UB on out-of-bounds values.
767 // This method, however, is only used with non-saturating casts that
768 // have UB on out-of-bounds values. This means that it's ok if we use
769 // the raw wasm instruction since out-of-bounds values can do whatever
770 // we like. To ensure that LLVM picks the right instruction we choose
771 // the raw wasm intrinsic functions which avoid LLVM inserting all the
772 // other control flow automatically.
773 if self.sess().target.is_like_wasm {
774 let src_ty = self.cx.val_ty(val);
775 if self.cx.type_kind(src_ty) != TypeKind::Vector {
776 let float_width = self.cx.float_width(src_ty);
777 let int_width = self.cx.int_width(dest_ty);
778 let name = match (int_width, float_width) {
779 (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
780 (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
781 (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
782 (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
785 if let Some(name) = name {
786 return self.call_intrinsic(name, &[val]);
790 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
793 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
794 // see `fptoui` above for why wasm is different here
795 if self.sess().target.is_like_wasm {
796 let src_ty = self.cx.val_ty(val);
797 if self.cx.type_kind(src_ty) != TypeKind::Vector {
798 let float_width = self.cx.float_width(src_ty);
799 let int_width = self.cx.int_width(dest_ty);
800 let name = match (int_width, float_width) {
801 (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
802 (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
803 (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
804 (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
807 if let Some(name) = name {
808 return self.call_intrinsic(name, &[val]);
812 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
815 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
816 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
819 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
820 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
823 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
824 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
827 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
828 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
831 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
832 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
835 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
836 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
839 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
840 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
843 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
844 unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
847 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
848 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
852 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
853 let op = llvm::IntPredicate::from_generic(op);
854 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
857 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
858 let op = llvm::RealPredicate::from_generic(op);
859 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
862 /* Miscellaneous instructions */
872 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
873 let size = self.intcast(size, self.type_isize(), false);
874 let is_volatile = flags.contains(MemFlags::VOLATILE);
875 let dst = self.pointercast(dst, self.type_i8p());
876 let src = self.pointercast(src, self.type_i8p());
878 llvm::LLVMRustBuildMemCpy(
881 dst_align.bytes() as c_uint,
883 src_align.bytes() as c_uint,
899 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
900 let size = self.intcast(size, self.type_isize(), false);
901 let is_volatile = flags.contains(MemFlags::VOLATILE);
902 let dst = self.pointercast(dst, self.type_i8p());
903 let src = self.pointercast(src, self.type_i8p());
905 llvm::LLVMRustBuildMemMove(
908 dst_align.bytes() as c_uint,
910 src_align.bytes() as c_uint,
920 fill_byte: &'ll Value,
925 let is_volatile = flags.contains(MemFlags::VOLATILE);
926 let ptr = self.pointercast(ptr, self.type_i8p());
928 llvm::LLVMRustBuildMemSet(
931 align.bytes() as c_uint,
942 then_val: &'ll Value,
943 else_val: &'ll Value,
945 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
948 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
949 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
952 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
953 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
956 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
958 let elt_ty = self.cx.val_ty(elt);
959 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
960 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
961 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
962 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
966 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
967 assert_eq!(idx as c_uint as u64, idx);
968 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
971 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
972 assert_eq!(idx as c_uint as u64, idx);
973 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
976 fn set_personality_fn(&mut self, personality: &'ll Value) {
978 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
982 fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
983 let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
984 let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */);
986 llvm::LLVMSetCleanup(landing_pad, llvm::True);
988 (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
991 fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
992 let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false);
993 let mut exn = self.const_undef(ty);
994 exn = self.insert_value(exn, exn0, 0);
995 exn = self.insert_value(exn, exn1, 1);
997 llvm::LLVMBuildResume(self.llbuilder, exn);
1001 fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
1002 let name = cstr!("cleanuppad");
1004 llvm::LLVMRustBuildCleanupPad(
1007 args.len() as c_uint,
1012 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
1015 fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) {
1017 llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
1018 .expect("LLVM does not have support for cleanupret");
1022 fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
1023 let name = cstr!("catchpad");
1025 llvm::LLVMRustBuildCatchPad(
1028 args.len() as c_uint,
1033 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
1038 parent: Option<&'ll Value>,
1039 unwind: Option<&'ll BasicBlock>,
1040 handlers: &[&'ll BasicBlock],
1042 let name = cstr!("catchswitch");
1044 llvm::LLVMRustBuildCatchSwitch(
1048 handlers.len() as c_uint,
1052 let ret = ret.expect("LLVM does not have support for catchswitch");
1053 for handler in handlers {
1055 llvm::LLVMRustAddHandler(ret, handler);
1061 // Atomic Operations
1067 order: rustc_codegen_ssa::common::AtomicOrdering,
1068 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
1071 let weak = if weak { llvm::True } else { llvm::False };
1073 let value = llvm::LLVMBuildAtomicCmpXchg(
1078 AtomicOrdering::from_generic(order),
1079 AtomicOrdering::from_generic(failure_order),
1080 llvm::False, // SingleThreaded
1082 llvm::LLVMSetWeak(value, weak);
1088 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
1091 order: rustc_codegen_ssa::common::AtomicOrdering,
1094 llvm::LLVMBuildAtomicRMW(
1096 AtomicRmwBinOp::from_generic(op),
1099 AtomicOrdering::from_generic(order),
1100 llvm::False, // SingleThreaded
1107 order: rustc_codegen_ssa::common::AtomicOrdering,
1108 scope: SynchronizationScope,
1110 let single_threaded = match scope {
1111 SynchronizationScope::SingleThread => llvm::True,
1112 SynchronizationScope::CrossThread => llvm::False,
1115 llvm::LLVMBuildFence(
1117 AtomicOrdering::from_generic(order),
1124 fn set_invariant_load(&mut self, load: &'ll Value) {
1126 llvm::LLVMSetMetadata(
1128 llvm::MD_invariant_load as c_uint,
1129 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
1134 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
1135 self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
1138 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
1139 self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
1142 fn instrprof_increment(
1144 fn_name: &'ll Value,
1146 num_counters: &'ll Value,
1150 "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
1151 fn_name, hash, num_counters, index
1154 let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
1155 let llty = self.cx.type_func(
1156 &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
1157 self.cx.type_void(),
1159 let args = &[fn_name, hash, num_counters, index];
1160 let args = self.check_call("call", llty, llfn, args);
1163 let _ = llvm::LLVMRustBuildCall(
1167 args.as_ptr() as *const &llvm::Value,
1168 args.len() as c_uint,
1178 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
1180 args: &[&'ll Value],
1181 funclet: Option<&Funclet<'ll>>,
1183 debug!("call {:?} with args ({:?})", llfn, args);
1185 let args = self.check_call("call", llty, llfn, args);
1186 let funclet_bundle = funclet.map(|funclet| funclet.bundle());
1187 let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
1188 let mut bundles = vec![funclet_bundle];
1190 // Set KCFI operand bundle
1191 let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() };
1193 if self.tcx.sess.is_sanitizer_kcfi_enabled() && fn_abi.is_some() && is_indirect_call {
1194 let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap());
1195 Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
1199 if kcfi_bundle.is_some() {
1200 let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
1201 bundles.push(kcfi_bundle);
1204 bundles.retain(|bundle| bundle.is_some());
1206 llvm::LLVMRustBuildCall(
1210 args.as_ptr() as *const &llvm::Value,
1211 args.len() as c_uint,
1213 bundles.len() as c_uint,
1216 if let Some(fn_abi) = fn_abi {
1217 fn_abi.apply_attrs_callsite(self, call);
1222 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1223 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
1226 fn do_not_inline(&mut self, llret: &'ll Value) {
1227 let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
1228 attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
1232 impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
1233 fn get_static(&mut self, def_id: DefId) -> &'ll Value {
1234 // Forward to the `get_static` method of `CodegenCx`
1235 self.cx().get_static(def_id)
1239 impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
1240 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
1241 // Create a fresh builder from the crate context.
1242 let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
1243 Builder { llbuilder, cx }
1246 pub fn llfn(&self) -> &'ll Value {
1247 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
1250 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
1252 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
1256 fn align_metadata(&mut self, load: &'ll Value, align: Align) {
1258 let v = [self.cx.const_u64(align.bytes())];
1260 llvm::LLVMSetMetadata(
1262 llvm::MD_align as c_uint,
1263 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
1268 fn noundef_metadata(&mut self, load: &'ll Value) {
1270 llvm::LLVMSetMetadata(
1272 llvm::MD_noundef as c_uint,
1273 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
1278 pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1279 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1282 pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1283 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1286 pub fn insert_element(
1292 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
1295 pub fn shuffle_vector(
1301 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
1304 pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1305 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1307 pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1308 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1310 pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1312 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1313 llvm::LLVMRustSetFastMath(instr);
1317 pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1319 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1320 llvm::LLVMRustSetFastMath(instr);
1324 pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1325 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1327 pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1328 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1330 pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1331 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1333 pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1334 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1336 pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1337 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1339 pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1341 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
1344 pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1346 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
1349 pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1352 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1353 llvm::LLVMRustSetFastMath(instr);
1357 pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1360 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1361 llvm::LLVMRustSetFastMath(instr);
1365 pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1366 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1368 pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1369 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1372 pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1374 llvm::LLVMAddClause(landing_pad, clause);
1378 pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1380 unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
1381 ret.expect("LLVM does not have support for catchret")
1384 fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
1385 let dest_ptr_ty = self.cx.val_ty(ptr);
1386 let stored_ty = self.cx.val_ty(val);
1387 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1389 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1391 if dest_ptr_ty == stored_ptr_ty {
1395 "type mismatch in store. \
1396 Expected {:?}, got {:?}; inserting bitcast",
1397 dest_ptr_ty, stored_ptr_ty
1399 self.bitcast(ptr, stored_ptr_ty)
1408 args: &'b [&'ll Value],
1409 ) -> Cow<'b, [&'ll Value]> {
1411 self.cx.type_kind(fn_ty) == TypeKind::Function,
1412 "builder::{} not passed a function, but {:?}",
1417 let param_tys = self.cx.func_params_types(fn_ty);
1419 let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.val_ty(v)))
1420 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1423 return Cow::Borrowed(args);
1426 let casted_args: Vec<_> = iter::zip(param_tys, args)
1428 .map(|(i, (expected_ty, &actual_val))| {
1429 let actual_ty = self.val_ty(actual_val);
1430 if expected_ty != actual_ty {
1432 "type mismatch in function call of {:?}. \
1433 Expected {:?} for param {}, got {:?}; injecting bitcast",
1434 llfn, expected_ty, i, actual_ty
1436 self.bitcast(actual_val, expected_ty)
1443 Cow::Owned(casted_args)
1446 pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
1447 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1450 pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
1451 let (ty, f) = self.cx.get_intrinsic(intrinsic);
1452 self.call(ty, None, f, args, None)
1455 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1456 let size = size.bytes();
1461 if !self.cx().sess().emit_lifetime_markers() {
1465 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1466 self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
1472 vals: &[&'ll Value],
1473 bbs: &[&'ll BasicBlock],
1475 assert_eq!(vals.len(), bbs.len());
1476 let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
1478 llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
1483 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1485 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1489 fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1490 let src_ty = self.cx.val_ty(val);
1491 let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
1492 assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
1494 self.cx.element_type(src_ty),
1495 self.cx.element_type(dest_ty),
1496 Some(self.cx.vector_length(src_ty)),
1499 (src_ty, dest_ty, None)
1501 let float_width = self.cx.float_width(float_ty);
1502 let int_width = self.cx.int_width(int_ty);
1504 let instr = if signed { "fptosi" } else { "fptoui" };
1505 let name = if let Some(vector_length) = vector_length {
1507 "llvm.{}.sat.v{}i{}.v{}f{}",
1508 instr, vector_length, int_width, vector_length, float_width
1511 format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
1513 let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
1514 self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None)
1517 pub(crate) fn landing_pad(
1520 pers_fn: &'ll Value,
1523 // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while,
1524 // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The
1525 // personality lives on the parent function anyway.
1526 self.set_personality_fn(pers_fn);
1528 llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED)