1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
10 use rustc_middle::ty::{self, Ty, TyCtxt};
11 use rustc_span::{sym, Span};
12 use rustc_target::abi::{
13 call::{FnAbi, PassMode},
17 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
26 let layout = bx.layout_of(ty);
27 let size = layout.size;
28 let align = layout.align.abi;
29 let size = bx.mul(bx.const_usize(size.bytes()), count);
30 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
32 bx.memmove(dst, align, src, align, size, flags);
34 bx.memcpy(dst, align, src, align, size, flags);
38 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
46 let layout = bx.layout_of(ty);
47 let size = layout.size;
48 let align = layout.align.abi;
49 let size = bx.mul(bx.const_usize(size.bytes()), count);
50 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51 bx.memset(dst, val, size, align, flags);
54 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55 pub fn codegen_intrinsic_call(
57 instance: ty::Instance<'tcx>,
58 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
59 args: &[OperandRef<'tcx, Bx::Value>],
63 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
65 let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
66 bug!("expected fn item type, found {}", callee_ty);
69 let sig = callee_ty.fn_sig(bx.tcx());
70 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
71 let arg_tys = sig.inputs();
72 let ret_ty = sig.output();
73 let name = bx.tcx().item_name(def_id);
74 let name_str = name.as_str();
76 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
77 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
79 let llval = match name {
85 sym::va_start => bx.va_start(args[0].immediate()),
86 sym::va_end => bx.va_end(args[0].immediate()),
88 let tp_ty = substs.type_at(0);
89 if let OperandValue::Pair(_, meta) = args[0].val {
90 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
93 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
96 sym::min_align_of_val => {
97 let tp_ty = substs.type_at(0);
98 if let OperandValue::Pair(_, meta) = args[0].val {
99 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
102 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
105 sym::vtable_size | sym::vtable_align => {
106 let vtable = args[0].immediate();
107 let idx = match name {
108 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
109 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
112 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
114 // Size is always <= isize::MAX.
115 sym::vtable_size => {
116 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
117 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
119 // Alignment is always nonzero.
120 sym::vtable_align => bx.range_metadata(value, WrappingRange { start: 1, end: !0 }),
129 | sym::variant_count => {
132 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
134 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
137 let ty = substs.type_at(0);
138 let layout = bx.layout_of(ty);
139 let ptr = args[0].immediate();
140 let offset = args[1].immediate();
141 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
143 sym::arith_offset => {
144 let ty = substs.type_at(0);
145 let layout = bx.layout_of(ty);
146 let ptr = args[0].immediate();
147 let offset = args[1].immediate();
148 bx.gep(bx.backend_type(layout), ptr, &[offset])
162 sym::write_bytes => {
174 sym::volatile_copy_nonoverlapping_memory => {
186 sym::volatile_copy_memory => {
198 sym::volatile_set_memory => {
209 sym::volatile_store => {
210 let dst = args[0].deref(bx.cx());
211 args[1].val.volatile_store(bx, dst);
214 sym::unaligned_volatile_store => {
215 let dst = args[0].deref(bx.cx());
216 args[1].val.unaligned_volatile_store(bx, dst);
219 sym::add_with_overflow
220 | sym::sub_with_overflow
221 | sym::mul_with_overflow
229 | sym::exact_div => {
231 match int_type_width_signed(ty, bx.tcx()) {
232 Some((_width, signed)) => match name {
233 sym::add_with_overflow
234 | sym::sub_with_overflow
235 | sym::mul_with_overflow => {
236 let op = match name {
237 sym::add_with_overflow => OverflowOp::Add,
238 sym::sub_with_overflow => OverflowOp::Sub,
239 sym::mul_with_overflow => OverflowOp::Mul,
242 let (val, overflow) =
243 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
244 // Convert `i1` to a `bool`, and write it to the out parameter
245 let val = bx.from_immediate(val);
246 let overflow = bx.from_immediate(overflow);
248 let dest = result.project_field(bx, 0);
249 bx.store(val, dest.llval, dest.align);
250 let dest = result.project_field(bx, 1);
251 bx.store(overflow, dest.llval, dest.align);
257 bx.exactsdiv(args[0].immediate(), args[1].immediate())
259 bx.exactudiv(args[0].immediate(), args[1].immediate())
262 sym::unchecked_div => {
264 bx.sdiv(args[0].immediate(), args[1].immediate())
266 bx.udiv(args[0].immediate(), args[1].immediate())
269 sym::unchecked_rem => {
271 bx.srem(args[0].immediate(), args[1].immediate())
273 bx.urem(args[0].immediate(), args[1].immediate())
276 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
277 sym::unchecked_shr => {
279 bx.ashr(args[0].immediate(), args[1].immediate())
281 bx.lshr(args[0].immediate(), args[1].immediate())
284 sym::unchecked_add => {
286 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
288 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
291 sym::unchecked_sub => {
293 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
295 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
298 sym::unchecked_mul => {
300 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
302 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
308 span_invalid_monomorphization_error(
312 "invalid monomorphization of `{}` intrinsic: \
313 expected basic integer type, found `{}`",
321 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
322 match float_type_width(arg_tys[0]) {
323 Some(_width) => match name {
324 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
325 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
326 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
327 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
328 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
332 span_invalid_monomorphization_error(
336 "invalid monomorphization of `{}` intrinsic: \
337 expected basic float type, found `{}`",
346 sym::float_to_int_unchecked => {
347 if float_type_width(arg_tys[0]).is_none() {
348 span_invalid_monomorphization_error(
352 "invalid monomorphization of `float_to_int_unchecked` \
353 intrinsic: expected basic float type, \
360 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
361 span_invalid_monomorphization_error(
365 "invalid monomorphization of `float_to_int_unchecked` \
366 intrinsic: expected basic integer type, \
374 bx.fptosi(args[0].immediate(), llret_ty)
376 bx.fptoui(args[0].immediate(), llret_ty)
380 sym::discriminant_value => {
381 if ret_ty.is_integral() {
382 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
384 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
388 sym::const_allocate => {
389 // returns a null pointer at runtime.
390 bx.const_null(bx.type_i8p())
393 sym::const_deallocate => {
398 // This requires that atomic intrinsics follow a specific naming pattern:
399 // "atomic_<operation>[_<ordering>]"
400 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
401 use crate::common::AtomicOrdering::*;
402 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
404 let Some((instruction, ordering)) = atomic.split_once('_') else {
405 bx.sess().fatal("Atomic intrinsic missing memory ordering");
408 let parse_ordering = |bx: &Bx, s| match s {
409 "unordered" => Unordered,
410 "relaxed" => Relaxed,
411 "acquire" => Acquire,
412 "release" => Release,
413 "acqrel" => AcquireRelease,
414 "seqcst" => SequentiallyConsistent,
415 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
418 let invalid_monomorphization = |ty| {
419 span_invalid_monomorphization_error(
423 "invalid monomorphization of `{}` intrinsic: \
424 expected basic integer type, found `{}`",
431 "cxchg" | "cxchgweak" => {
432 let Some((success, failure)) = ordering.split_once('_') else {
433 bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
435 let ty = substs.type_at(0);
436 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
437 let weak = instruction == "cxchgweak";
438 let mut dst = args[0].immediate();
439 let mut cmp = args[1].immediate();
440 let mut src = args[2].immediate();
441 if ty.is_unsafe_ptr() {
442 // Some platforms do not support atomic operations on pointers,
443 // so we cast to integer first.
444 let ptr_llty = bx.type_ptr_to(bx.type_isize());
445 dst = bx.pointercast(dst, ptr_llty);
446 cmp = bx.ptrtoint(cmp, bx.type_isize());
447 src = bx.ptrtoint(src, bx.type_isize());
449 let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
450 let val = bx.extract_value(pair, 0);
451 let success = bx.extract_value(pair, 1);
452 let val = bx.from_immediate(val);
453 let success = bx.from_immediate(success);
455 let dest = result.project_field(bx, 0);
456 bx.store(val, dest.llval, dest.align);
457 let dest = result.project_field(bx, 1);
458 bx.store(success, dest.llval, dest.align);
461 return invalid_monomorphization(ty);
466 let ty = substs.type_at(0);
467 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
468 let layout = bx.layout_of(ty);
469 let size = layout.size;
470 let mut source = args[0].immediate();
471 if ty.is_unsafe_ptr() {
472 // Some platforms do not support atomic operations on pointers,
473 // so we cast to integer first...
474 let llty = bx.type_isize();
475 let ptr_llty = bx.type_ptr_to(llty);
476 source = bx.pointercast(source, ptr_llty);
477 let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
478 // ... and then cast the result back to a pointer
479 bx.inttoptr(result, bx.backend_type(layout))
481 bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
484 return invalid_monomorphization(ty);
489 let ty = substs.type_at(0);
490 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
491 let size = bx.layout_of(ty).size;
492 let mut val = args[1].immediate();
493 let mut ptr = args[0].immediate();
494 if ty.is_unsafe_ptr() {
495 // Some platforms do not support atomic operations on pointers,
496 // so we cast to integer first.
497 let ptr_llty = bx.type_ptr_to(bx.type_isize());
498 ptr = bx.pointercast(ptr, ptr_llty);
499 val = bx.ptrtoint(val, bx.type_isize());
501 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
504 return invalid_monomorphization(ty);
509 bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
513 "singlethreadfence" => {
514 bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
518 // These are all AtomicRMW ops
520 let atom_op = match op {
521 "xchg" => AtomicRmwBinOp::AtomicXchg,
522 "xadd" => AtomicRmwBinOp::AtomicAdd,
523 "xsub" => AtomicRmwBinOp::AtomicSub,
524 "and" => AtomicRmwBinOp::AtomicAnd,
525 "nand" => AtomicRmwBinOp::AtomicNand,
526 "or" => AtomicRmwBinOp::AtomicOr,
527 "xor" => AtomicRmwBinOp::AtomicXor,
528 "max" => AtomicRmwBinOp::AtomicMax,
529 "min" => AtomicRmwBinOp::AtomicMin,
530 "umax" => AtomicRmwBinOp::AtomicUMax,
531 "umin" => AtomicRmwBinOp::AtomicUMin,
532 _ => bx.sess().fatal("unknown atomic operation"),
535 let ty = substs.type_at(0);
536 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
537 let mut ptr = args[0].immediate();
538 let mut val = args[1].immediate();
539 if ty.is_unsafe_ptr() {
540 // Some platforms do not support atomic operations on pointers,
541 // so we cast to integer first.
542 let ptr_llty = bx.type_ptr_to(bx.type_isize());
543 ptr = bx.pointercast(ptr, ptr_llty);
544 val = bx.ptrtoint(val, bx.type_isize());
546 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
548 return invalid_monomorphization(ty);
554 sym::nontemporal_store => {
555 let dst = args[0].deref(bx.cx());
556 args[1].val.nontemporal_store(bx, dst);
560 sym::ptr_guaranteed_cmp => {
561 let a = args[0].immediate();
562 let b = args[1].immediate();
563 bx.icmp(IntPredicate::IntEQ, a, b)
566 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
567 let ty = substs.type_at(0);
568 let pointee_size = bx.layout_of(ty).size;
570 let a = args[0].immediate();
571 let b = args[1].immediate();
572 let a = bx.ptrtoint(a, bx.type_isize());
573 let b = bx.ptrtoint(b, bx.type_isize());
574 let pointee_size = bx.const_usize(pointee_size.bytes());
575 if name == sym::ptr_offset_from {
576 // This is the same sequence that Clang emits for pointer subtraction.
577 // It can be neither `nsw` nor `nuw` because the input is treated as
578 // unsigned but then the output is treated as signed, so neither works.
579 let d = bx.sub(a, b);
580 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
581 bx.exactsdiv(d, pointee_size)
583 // The `_unsigned` version knows the relative ordering of the pointers,
584 // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
585 let d = bx.unchecked_usub(a, b);
586 bx.exactudiv(d, pointee_size)
591 // Need to use backend-specific things in the implementation.
592 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
597 if !fn_abi.ret.is_ignore() {
598 if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
599 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
600 let ptr = bx.pointercast(result.llval, ptr_llty);
601 bx.store(llval, ptr, result.align);
603 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
611 // Returns the width of an int Ty, and if it's signed or not
612 // Returns None if the type is not an integer
613 // FIXME: there’s multiple of this functions, investigate using some of the already existing
615 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
618 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
621 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
627 // Returns the width of a float Ty
628 // Returns None if the type is not a float
629 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
631 ty::Float(t) => Some(t.bit_width()),