1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
9 use rustc_middle::ty::{self, Ty, TyCtxt};
10 use rustc_span::{sym, Span};
11 use rustc_target::abi::call::{FnAbi, PassMode};
13 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
22 let layout = bx.layout_of(ty);
23 let size = layout.size;
24 let align = layout.align.abi;
25 let size = bx.mul(bx.const_usize(size.bytes()), count);
26 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
28 bx.memmove(dst, align, src, align, size, flags);
30 bx.memcpy(dst, align, src, align, size, flags);
34 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
42 let layout = bx.layout_of(ty);
43 let size = layout.size;
44 let align = layout.align.abi;
45 let size = bx.mul(bx.const_usize(size.bytes()), count);
46 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
47 bx.memset(dst, val, size, align, flags);
50 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
51 pub fn codegen_intrinsic_call(
53 instance: ty::Instance<'tcx>,
54 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
55 args: &[OperandRef<'tcx, Bx::Value>],
59 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
61 let (def_id, substs) = match *callee_ty.kind() {
62 ty::FnDef(def_id, substs) => (def_id, substs),
63 _ => bug!("expected fn item type, found {}", callee_ty),
66 let sig = callee_ty.fn_sig(bx.tcx());
67 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
68 let arg_tys = sig.inputs();
69 let ret_ty = sig.output();
70 let name = bx.tcx().item_name(def_id);
71 let name_str = name.as_str();
73 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
74 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
76 let llval = match name {
78 bx.assume(args[0].immediate());
86 sym::va_start => bx.va_start(args[0].immediate()),
87 sym::va_end => bx.va_end(args[0].immediate()),
89 let tp_ty = substs.type_at(0);
90 if let OperandValue::Pair(_, meta) = args[0].val {
91 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
94 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
97 sym::min_align_of_val => {
98 let tp_ty = substs.type_at(0);
99 if let OperandValue::Pair(_, meta) = args[0].val {
100 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
103 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
110 | sym::variant_count => {
113 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
115 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
118 let ty = substs.type_at(0);
119 let layout = bx.layout_of(ty);
120 let ptr = args[0].immediate();
121 let offset = args[1].immediate();
122 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
124 sym::arith_offset => {
125 let ty = substs.type_at(0);
126 let layout = bx.layout_of(ty);
127 let ptr = args[0].immediate();
128 let offset = args[1].immediate();
129 bx.gep(bx.backend_type(layout), ptr, &[offset])
143 sym::write_bytes => {
155 sym::volatile_copy_nonoverlapping_memory => {
167 sym::volatile_copy_memory => {
179 sym::volatile_set_memory => {
190 sym::volatile_store => {
191 let dst = args[0].deref(bx.cx());
192 args[1].val.volatile_store(bx, dst);
195 sym::unaligned_volatile_store => {
196 let dst = args[0].deref(bx.cx());
197 args[1].val.unaligned_volatile_store(bx, dst);
200 sym::add_with_overflow
201 | sym::sub_with_overflow
202 | sym::mul_with_overflow
210 | sym::exact_div => {
212 match int_type_width_signed(ty, bx.tcx()) {
213 Some((_width, signed)) => match name {
214 sym::add_with_overflow
215 | sym::sub_with_overflow
216 | sym::mul_with_overflow => {
217 let op = match name {
218 sym::add_with_overflow => OverflowOp::Add,
219 sym::sub_with_overflow => OverflowOp::Sub,
220 sym::mul_with_overflow => OverflowOp::Mul,
223 let (val, overflow) =
224 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
225 // Convert `i1` to a `bool`, and write it to the out parameter
226 let val = bx.from_immediate(val);
227 let overflow = bx.from_immediate(overflow);
229 let dest = result.project_field(bx, 0);
230 bx.store(val, dest.llval, dest.align);
231 let dest = result.project_field(bx, 1);
232 bx.store(overflow, dest.llval, dest.align);
238 bx.exactsdiv(args[0].immediate(), args[1].immediate())
240 bx.exactudiv(args[0].immediate(), args[1].immediate())
243 sym::unchecked_div => {
245 bx.sdiv(args[0].immediate(), args[1].immediate())
247 bx.udiv(args[0].immediate(), args[1].immediate())
250 sym::unchecked_rem => {
252 bx.srem(args[0].immediate(), args[1].immediate())
254 bx.urem(args[0].immediate(), args[1].immediate())
257 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
258 sym::unchecked_shr => {
260 bx.ashr(args[0].immediate(), args[1].immediate())
262 bx.lshr(args[0].immediate(), args[1].immediate())
265 sym::unchecked_add => {
267 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
269 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
272 sym::unchecked_sub => {
274 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
276 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
279 sym::unchecked_mul => {
281 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
283 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
289 span_invalid_monomorphization_error(
293 "invalid monomorphization of `{}` intrinsic: \
294 expected basic integer type, found `{}`",
302 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
303 match float_type_width(arg_tys[0]) {
304 Some(_width) => match name {
305 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
306 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
307 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
308 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
309 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
313 span_invalid_monomorphization_error(
317 "invalid monomorphization of `{}` intrinsic: \
318 expected basic float type, found `{}`",
327 sym::float_to_int_unchecked => {
328 if float_type_width(arg_tys[0]).is_none() {
329 span_invalid_monomorphization_error(
333 "invalid monomorphization of `float_to_int_unchecked` \
334 intrinsic: expected basic float type, \
341 let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) {
344 span_invalid_monomorphization_error(
348 "invalid monomorphization of `float_to_int_unchecked` \
349 intrinsic: expected basic integer type, \
358 bx.fptosi(args[0].immediate(), llret_ty)
360 bx.fptoui(args[0].immediate(), llret_ty)
364 sym::discriminant_value => {
365 if ret_ty.is_integral() {
366 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
368 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
372 sym::const_allocate => {
373 // returns a null pointer at runtime.
374 bx.const_null(bx.type_i8p())
377 sym::const_deallocate => {
382 // This requires that atomic intrinsics follow a specific naming pattern:
383 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
384 name if name_str.starts_with("atomic_") => {
385 use crate::common::AtomicOrdering::*;
386 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
388 let split: Vec<_> = name_str.split('_').collect();
390 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
391 let (order, failorder) = match split.len() {
392 2 => (SequentiallyConsistent, SequentiallyConsistent),
393 3 => match split[2] {
394 "unordered" => (Unordered, Unordered),
395 "relaxed" => (Monotonic, Monotonic),
396 "acq" => (Acquire, Acquire),
397 "rel" => (Release, Monotonic),
398 "acqrel" => (AcquireRelease, Acquire),
399 "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
400 "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
401 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
403 4 => match (split[2], split[3]) {
404 ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
405 ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
406 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
408 _ => bx.sess().fatal("Atomic intrinsic not in correct format"),
411 let invalid_monomorphization = |ty| {
412 span_invalid_monomorphization_error(
416 "invalid monomorphization of `{}` intrinsic: \
417 expected basic integer type, found `{}`",
424 "cxchg" | "cxchgweak" => {
425 let ty = substs.type_at(0);
426 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
427 let weak = split[1] == "cxchgweak";
428 let mut dst = args[0].immediate();
429 let mut cmp = args[1].immediate();
430 let mut src = args[2].immediate();
431 if ty.is_unsafe_ptr() {
432 // Some platforms do not support atomic operations on pointers,
433 // so we cast to integer first.
434 let ptr_llty = bx.type_ptr_to(bx.type_isize());
435 dst = bx.pointercast(dst, ptr_llty);
436 cmp = bx.ptrtoint(cmp, bx.type_isize());
437 src = bx.ptrtoint(src, bx.type_isize());
439 let pair = bx.atomic_cmpxchg(dst, cmp, src, order, failorder, weak);
440 let val = bx.extract_value(pair, 0);
441 let success = bx.extract_value(pair, 1);
442 let val = bx.from_immediate(val);
443 let success = bx.from_immediate(success);
445 let dest = result.project_field(bx, 0);
446 bx.store(val, dest.llval, dest.align);
447 let dest = result.project_field(bx, 1);
448 bx.store(success, dest.llval, dest.align);
451 return invalid_monomorphization(ty);
456 let ty = substs.type_at(0);
457 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
458 let layout = bx.layout_of(ty);
459 let size = layout.size;
460 let mut source = args[0].immediate();
461 if ty.is_unsafe_ptr() {
462 // Some platforms do not support atomic operations on pointers,
463 // so we cast to integer first...
464 let llty = bx.type_isize();
465 let ptr_llty = bx.type_ptr_to(llty);
466 source = bx.pointercast(source, ptr_llty);
467 let result = bx.atomic_load(llty, source, order, size);
468 // ... and then cast the result back to a pointer
469 bx.inttoptr(result, bx.backend_type(layout))
471 bx.atomic_load(bx.backend_type(layout), source, order, size)
474 return invalid_monomorphization(ty);
479 let ty = substs.type_at(0);
480 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
481 let size = bx.layout_of(ty).size;
482 let mut val = args[1].immediate();
483 let mut ptr = args[0].immediate();
484 if ty.is_unsafe_ptr() {
485 // Some platforms do not support atomic operations on pointers,
486 // so we cast to integer first.
487 let ptr_llty = bx.type_ptr_to(bx.type_isize());
488 ptr = bx.pointercast(ptr, ptr_llty);
489 val = bx.ptrtoint(val, bx.type_isize());
491 bx.atomic_store(val, ptr, order, size);
494 return invalid_monomorphization(ty);
499 bx.atomic_fence(order, SynchronizationScope::CrossThread);
503 "singlethreadfence" => {
504 bx.atomic_fence(order, SynchronizationScope::SingleThread);
508 // These are all AtomicRMW ops
510 let atom_op = match op {
511 "xchg" => AtomicRmwBinOp::AtomicXchg,
512 "xadd" => AtomicRmwBinOp::AtomicAdd,
513 "xsub" => AtomicRmwBinOp::AtomicSub,
514 "and" => AtomicRmwBinOp::AtomicAnd,
515 "nand" => AtomicRmwBinOp::AtomicNand,
516 "or" => AtomicRmwBinOp::AtomicOr,
517 "xor" => AtomicRmwBinOp::AtomicXor,
518 "max" => AtomicRmwBinOp::AtomicMax,
519 "min" => AtomicRmwBinOp::AtomicMin,
520 "umax" => AtomicRmwBinOp::AtomicUMax,
521 "umin" => AtomicRmwBinOp::AtomicUMin,
522 _ => bx.sess().fatal("unknown atomic operation"),
525 let ty = substs.type_at(0);
526 if int_type_width_signed(ty, bx.tcx()).is_some()
527 || (ty.is_unsafe_ptr() && op == "xchg")
529 let mut ptr = args[0].immediate();
530 let mut val = args[1].immediate();
531 if ty.is_unsafe_ptr() {
532 // Some platforms do not support atomic operations on pointers,
533 // so we cast to integer first.
534 let ptr_llty = bx.type_ptr_to(bx.type_isize());
535 ptr = bx.pointercast(ptr, ptr_llty);
536 val = bx.ptrtoint(val, bx.type_isize());
538 bx.atomic_rmw(atom_op, ptr, val, order)
540 return invalid_monomorphization(ty);
546 sym::nontemporal_store => {
547 let dst = args[0].deref(bx.cx());
548 args[1].val.nontemporal_store(bx, dst);
552 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
553 let a = args[0].immediate();
554 let b = args[1].immediate();
555 if name == sym::ptr_guaranteed_eq {
556 bx.icmp(IntPredicate::IntEQ, a, b)
558 bx.icmp(IntPredicate::IntNE, a, b)
562 sym::ptr_offset_from => {
563 let ty = substs.type_at(0);
564 let pointee_size = bx.layout_of(ty).size;
566 // This is the same sequence that Clang emits for pointer subtraction.
567 // It can be neither `nsw` nor `nuw` because the input is treated as
568 // unsigned but then the output is treated as signed, so neither works.
569 let a = args[0].immediate();
570 let b = args[1].immediate();
571 let a = bx.ptrtoint(a, bx.type_isize());
572 let b = bx.ptrtoint(b, bx.type_isize());
573 let d = bx.sub(a, b);
574 let pointee_size = bx.const_usize(pointee_size.bytes());
575 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
576 bx.exactsdiv(d, pointee_size)
580 // Need to use backend-specific things in the implementation.
581 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
586 if !fn_abi.ret.is_ignore() {
587 if let PassMode::Cast(ty) = fn_abi.ret.mode {
588 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
589 let ptr = bx.pointercast(result.llval, ptr_llty);
590 bx.store(llval, ptr, result.align);
592 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
600 // Returns the width of an int Ty, and if it's signed or not
601 // Returns None if the type is not an integer
602 // FIXME: there’s multiple of this functions, investigate using some of the already existing
604 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
607 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
610 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
616 // Returns the width of a float Ty
617 // Returns None if the type is not a float
618 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
620 ty::Float(t) => Some(t.bit_width()),