1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
10 use rustc_middle::ty::{self, Ty, TyCtxt};
11 use rustc_span::{sym, Span};
12 use rustc_target::abi::{
13 call::{FnAbi, PassMode},
17 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
26 let layout = bx.layout_of(ty);
27 let size = layout.size;
28 let align = layout.align.abi;
29 let size = bx.mul(bx.const_usize(size.bytes()), count);
30 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
32 bx.memmove(dst, align, src, align, size, flags);
34 bx.memcpy(dst, align, src, align, size, flags);
38 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
46 let layout = bx.layout_of(ty);
47 let size = layout.size;
48 let align = layout.align.abi;
49 let size = bx.mul(bx.const_usize(size.bytes()), count);
50 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51 bx.memset(dst, val, size, align, flags);
54 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55 pub fn codegen_intrinsic_call(
57 instance: ty::Instance<'tcx>,
58 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
59 args: &[OperandRef<'tcx, Bx::Value>],
63 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
65 let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
66 bug!("expected fn item type, found {}", callee_ty);
69 let sig = callee_ty.fn_sig(bx.tcx());
70 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
71 let arg_tys = sig.inputs();
72 let ret_ty = sig.output();
73 let name = bx.tcx().item_name(def_id);
74 let name_str = name.as_str();
76 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
77 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
79 let llval = match name {
81 bx.assume(args[0].immediate());
89 sym::va_start => bx.va_start(args[0].immediate()),
90 sym::va_end => bx.va_end(args[0].immediate()),
92 let tp_ty = substs.type_at(0);
93 if let OperandValue::Pair(_, meta) = args[0].val {
94 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
97 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
100 sym::min_align_of_val => {
101 let tp_ty = substs.type_at(0);
102 if let OperandValue::Pair(_, meta) = args[0].val {
103 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
106 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
109 sym::vtable_size | sym::vtable_align => {
110 let vtable = args[0].immediate();
111 let idx = match name {
112 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
113 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
116 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
117 if name == sym::vtable_align {
118 // Alignment is always nonzero.
119 bx.range_metadata(value, WrappingRange { start: 1, end: !0 });
127 | sym::variant_count => {
130 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
132 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
135 let ty = substs.type_at(0);
136 let layout = bx.layout_of(ty);
137 let ptr = args[0].immediate();
138 let offset = args[1].immediate();
139 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
141 sym::arith_offset => {
142 let ty = substs.type_at(0);
143 let layout = bx.layout_of(ty);
144 let ptr = args[0].immediate();
145 let offset = args[1].immediate();
146 bx.gep(bx.backend_type(layout), ptr, &[offset])
160 sym::write_bytes => {
172 sym::volatile_copy_nonoverlapping_memory => {
184 sym::volatile_copy_memory => {
196 sym::volatile_set_memory => {
207 sym::volatile_store => {
208 let dst = args[0].deref(bx.cx());
209 args[1].val.volatile_store(bx, dst);
212 sym::unaligned_volatile_store => {
213 let dst = args[0].deref(bx.cx());
214 args[1].val.unaligned_volatile_store(bx, dst);
217 sym::add_with_overflow
218 | sym::sub_with_overflow
219 | sym::mul_with_overflow
227 | sym::exact_div => {
229 match int_type_width_signed(ty, bx.tcx()) {
230 Some((_width, signed)) => match name {
231 sym::add_with_overflow
232 | sym::sub_with_overflow
233 | sym::mul_with_overflow => {
234 let op = match name {
235 sym::add_with_overflow => OverflowOp::Add,
236 sym::sub_with_overflow => OverflowOp::Sub,
237 sym::mul_with_overflow => OverflowOp::Mul,
240 let (val, overflow) =
241 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
242 // Convert `i1` to a `bool`, and write it to the out parameter
243 let val = bx.from_immediate(val);
244 let overflow = bx.from_immediate(overflow);
246 let dest = result.project_field(bx, 0);
247 bx.store(val, dest.llval, dest.align);
248 let dest = result.project_field(bx, 1);
249 bx.store(overflow, dest.llval, dest.align);
255 bx.exactsdiv(args[0].immediate(), args[1].immediate())
257 bx.exactudiv(args[0].immediate(), args[1].immediate())
260 sym::unchecked_div => {
262 bx.sdiv(args[0].immediate(), args[1].immediate())
264 bx.udiv(args[0].immediate(), args[1].immediate())
267 sym::unchecked_rem => {
269 bx.srem(args[0].immediate(), args[1].immediate())
271 bx.urem(args[0].immediate(), args[1].immediate())
274 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
275 sym::unchecked_shr => {
277 bx.ashr(args[0].immediate(), args[1].immediate())
279 bx.lshr(args[0].immediate(), args[1].immediate())
282 sym::unchecked_add => {
284 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
286 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
289 sym::unchecked_sub => {
291 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
293 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
296 sym::unchecked_mul => {
298 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
300 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
306 span_invalid_monomorphization_error(
310 "invalid monomorphization of `{}` intrinsic: \
311 expected basic integer type, found `{}`",
319 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
320 match float_type_width(arg_tys[0]) {
321 Some(_width) => match name {
322 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
323 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
324 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
325 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
326 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
330 span_invalid_monomorphization_error(
334 "invalid monomorphization of `{}` intrinsic: \
335 expected basic float type, found `{}`",
344 sym::float_to_int_unchecked => {
345 if float_type_width(arg_tys[0]).is_none() {
346 span_invalid_monomorphization_error(
350 "invalid monomorphization of `float_to_int_unchecked` \
351 intrinsic: expected basic float type, \
358 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
359 span_invalid_monomorphization_error(
363 "invalid monomorphization of `float_to_int_unchecked` \
364 intrinsic: expected basic integer type, \
372 bx.fptosi(args[0].immediate(), llret_ty)
374 bx.fptoui(args[0].immediate(), llret_ty)
378 sym::discriminant_value => {
379 if ret_ty.is_integral() {
380 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
382 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
386 sym::const_allocate => {
387 // returns a null pointer at runtime.
388 bx.const_null(bx.type_i8p())
391 sym::const_deallocate => {
396 // This requires that atomic intrinsics follow a specific naming pattern:
397 // "atomic_<operation>[_<ordering>]"
398 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
399 use crate::common::AtomicOrdering::*;
400 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
402 let Some((instruction, ordering)) = atomic.split_once('_') else {
403 bx.sess().fatal("Atomic intrinsic missing memory ordering");
406 let parse_ordering = |bx: &Bx, s| match s {
407 "unordered" => Unordered,
408 "relaxed" => Relaxed,
409 "acquire" => Acquire,
410 "release" => Release,
411 "acqrel" => AcquireRelease,
412 "seqcst" => SequentiallyConsistent,
413 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
416 let invalid_monomorphization = |ty| {
417 span_invalid_monomorphization_error(
421 "invalid monomorphization of `{}` intrinsic: \
422 expected basic integer type, found `{}`",
429 "cxchg" | "cxchgweak" => {
430 let Some((success, failure)) = ordering.split_once('_') else {
431 bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
433 let ty = substs.type_at(0);
434 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
435 let weak = instruction == "cxchgweak";
436 let mut dst = args[0].immediate();
437 let mut cmp = args[1].immediate();
438 let mut src = args[2].immediate();
439 if ty.is_unsafe_ptr() {
440 // Some platforms do not support atomic operations on pointers,
441 // so we cast to integer first.
442 let ptr_llty = bx.type_ptr_to(bx.type_isize());
443 dst = bx.pointercast(dst, ptr_llty);
444 cmp = bx.ptrtoint(cmp, bx.type_isize());
445 src = bx.ptrtoint(src, bx.type_isize());
447 let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
448 let val = bx.extract_value(pair, 0);
449 let success = bx.extract_value(pair, 1);
450 let val = bx.from_immediate(val);
451 let success = bx.from_immediate(success);
453 let dest = result.project_field(bx, 0);
454 bx.store(val, dest.llval, dest.align);
455 let dest = result.project_field(bx, 1);
456 bx.store(success, dest.llval, dest.align);
459 return invalid_monomorphization(ty);
464 let ty = substs.type_at(0);
465 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
466 let layout = bx.layout_of(ty);
467 let size = layout.size;
468 let mut source = args[0].immediate();
469 if ty.is_unsafe_ptr() {
470 // Some platforms do not support atomic operations on pointers,
471 // so we cast to integer first...
472 let llty = bx.type_isize();
473 let ptr_llty = bx.type_ptr_to(llty);
474 source = bx.pointercast(source, ptr_llty);
475 let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
476 // ... and then cast the result back to a pointer
477 bx.inttoptr(result, bx.backend_type(layout))
479 bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
482 return invalid_monomorphization(ty);
487 let ty = substs.type_at(0);
488 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
489 let size = bx.layout_of(ty).size;
490 let mut val = args[1].immediate();
491 let mut ptr = args[0].immediate();
492 if ty.is_unsafe_ptr() {
493 // Some platforms do not support atomic operations on pointers,
494 // so we cast to integer first.
495 let ptr_llty = bx.type_ptr_to(bx.type_isize());
496 ptr = bx.pointercast(ptr, ptr_llty);
497 val = bx.ptrtoint(val, bx.type_isize());
499 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
502 return invalid_monomorphization(ty);
507 bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
511 "singlethreadfence" => {
512 bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
516 // These are all AtomicRMW ops
518 let atom_op = match op {
519 "xchg" => AtomicRmwBinOp::AtomicXchg,
520 "xadd" => AtomicRmwBinOp::AtomicAdd,
521 "xsub" => AtomicRmwBinOp::AtomicSub,
522 "and" => AtomicRmwBinOp::AtomicAnd,
523 "nand" => AtomicRmwBinOp::AtomicNand,
524 "or" => AtomicRmwBinOp::AtomicOr,
525 "xor" => AtomicRmwBinOp::AtomicXor,
526 "max" => AtomicRmwBinOp::AtomicMax,
527 "min" => AtomicRmwBinOp::AtomicMin,
528 "umax" => AtomicRmwBinOp::AtomicUMax,
529 "umin" => AtomicRmwBinOp::AtomicUMin,
530 _ => bx.sess().fatal("unknown atomic operation"),
533 let ty = substs.type_at(0);
534 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
535 let mut ptr = args[0].immediate();
536 let mut val = args[1].immediate();
537 if ty.is_unsafe_ptr() {
538 // Some platforms do not support atomic operations on pointers,
539 // so we cast to integer first.
540 let ptr_llty = bx.type_ptr_to(bx.type_isize());
541 ptr = bx.pointercast(ptr, ptr_llty);
542 val = bx.ptrtoint(val, bx.type_isize());
544 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
546 return invalid_monomorphization(ty);
552 sym::nontemporal_store => {
553 let dst = args[0].deref(bx.cx());
554 args[1].val.nontemporal_store(bx, dst);
558 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
559 let a = args[0].immediate();
560 let b = args[1].immediate();
561 if name == sym::ptr_guaranteed_eq {
562 bx.icmp(IntPredicate::IntEQ, a, b)
564 bx.icmp(IntPredicate::IntNE, a, b)
568 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
569 let ty = substs.type_at(0);
570 let pointee_size = bx.layout_of(ty).size;
572 let a = args[0].immediate();
573 let b = args[1].immediate();
574 let a = bx.ptrtoint(a, bx.type_isize());
575 let b = bx.ptrtoint(b, bx.type_isize());
576 let pointee_size = bx.const_usize(pointee_size.bytes());
577 if name == sym::ptr_offset_from {
578 // This is the same sequence that Clang emits for pointer subtraction.
579 // It can be neither `nsw` nor `nuw` because the input is treated as
580 // unsigned but then the output is treated as signed, so neither works.
581 let d = bx.sub(a, b);
582 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
583 bx.exactsdiv(d, pointee_size)
585 // The `_unsigned` version knows the relative ordering of the pointers,
586 // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
587 let d = bx.unchecked_usub(a, b);
588 bx.exactudiv(d, pointee_size)
593 // Need to use backend-specific things in the implementation.
594 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
599 if !fn_abi.ret.is_ignore() {
600 if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
601 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
602 let ptr = bx.pointercast(result.llval, ptr_llty);
603 bx.store(llval, ptr, result.align);
605 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
613 // Returns the width of an int Ty, and if it's signed or not
614 // Returns None if the type is not an integer
615 // FIXME: there’s multiple of this functions, investigate using some of the already existing
617 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
620 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
623 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
629 // Returns the width of a float Ty
630 // Returns None if the type is not a float
631 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
633 ty::Float(t) => Some(t.bit_width()),