]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Auto merge of #100733 - scottmcm:inline-from-from-identity, r=m-ou-se
[rust.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::FunctionCx;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5 use crate::glue;
6 use crate::meth;
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::ty::{self, Ty, TyCtxt};
11 use rustc_span::{sym, Span};
12 use rustc_target::abi::{
13     call::{FnAbi, PassMode},
14     WrappingRange,
15 };
16
17 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18     bx: &mut Bx,
19     allow_overlap: bool,
20     volatile: bool,
21     ty: Ty<'tcx>,
22     dst: Bx::Value,
23     src: Bx::Value,
24     count: Bx::Value,
25 ) {
26     let layout = bx.layout_of(ty);
27     let size = layout.size;
28     let align = layout.align.abi;
29     let size = bx.mul(bx.const_usize(size.bytes()), count);
30     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31     if allow_overlap {
32         bx.memmove(dst, align, src, align, size, flags);
33     } else {
34         bx.memcpy(dst, align, src, align, size, flags);
35     }
36 }
37
38 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39     bx: &mut Bx,
40     volatile: bool,
41     ty: Ty<'tcx>,
42     dst: Bx::Value,
43     val: Bx::Value,
44     count: Bx::Value,
45 ) {
46     let layout = bx.layout_of(ty);
47     let size = layout.size;
48     let align = layout.align.abi;
49     let size = bx.mul(bx.const_usize(size.bytes()), count);
50     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51     bx.memset(dst, val, size, align, flags);
52 }
53
54 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55     pub fn codegen_intrinsic_call(
56         bx: &mut Bx,
57         instance: ty::Instance<'tcx>,
58         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
59         args: &[OperandRef<'tcx, Bx::Value>],
60         llresult: Bx::Value,
61         span: Span,
62     ) {
63         let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
64
65         let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
66             bug!("expected fn item type, found {}", callee_ty);
67         };
68
69         let sig = callee_ty.fn_sig(bx.tcx());
70         let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
71         let arg_tys = sig.inputs();
72         let ret_ty = sig.output();
73         let name = bx.tcx().item_name(def_id);
74         let name_str = name.as_str();
75
76         let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
77         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
78
79         let llval = match name {
80             sym::assume => {
81                 bx.assume(args[0].immediate());
82                 return;
83             }
84             sym::abort => {
85                 bx.abort();
86                 return;
87             }
88
89             sym::va_start => bx.va_start(args[0].immediate()),
90             sym::va_end => bx.va_end(args[0].immediate()),
91             sym::size_of_val => {
92                 let tp_ty = substs.type_at(0);
93                 if let OperandValue::Pair(_, meta) = args[0].val {
94                     let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
95                     llsize
96                 } else {
97                     bx.const_usize(bx.layout_of(tp_ty).size.bytes())
98                 }
99             }
100             sym::min_align_of_val => {
101                 let tp_ty = substs.type_at(0);
102                 if let OperandValue::Pair(_, meta) = args[0].val {
103                     let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
104                     llalign
105                 } else {
106                     bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
107                 }
108             }
109             sym::vtable_size | sym::vtable_align => {
110                 let vtable = args[0].immediate();
111                 let idx = match name {
112                     sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
113                     sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
114                     _ => bug!(),
115                 };
116                 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
117                 if name == sym::vtable_align {
118                     // Alignment is always nonzero.
119                     bx.range_metadata(value, WrappingRange { start: 1, end: !0 });
120                 };
121                 value
122             }
123             sym::pref_align_of
124             | sym::needs_drop
125             | sym::type_id
126             | sym::type_name
127             | sym::variant_count => {
128                 let value = bx
129                     .tcx()
130                     .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
131                     .unwrap();
132                 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
133             }
134             sym::offset => {
135                 let ty = substs.type_at(0);
136                 let layout = bx.layout_of(ty);
137                 let ptr = args[0].immediate();
138                 let offset = args[1].immediate();
139                 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
140             }
141             sym::arith_offset => {
142                 let ty = substs.type_at(0);
143                 let layout = bx.layout_of(ty);
144                 let ptr = args[0].immediate();
145                 let offset = args[1].immediate();
146                 bx.gep(bx.backend_type(layout), ptr, &[offset])
147             }
148             sym::copy => {
149                 copy_intrinsic(
150                     bx,
151                     true,
152                     false,
153                     substs.type_at(0),
154                     args[1].immediate(),
155                     args[0].immediate(),
156                     args[2].immediate(),
157                 );
158                 return;
159             }
160             sym::write_bytes => {
161                 memset_intrinsic(
162                     bx,
163                     false,
164                     substs.type_at(0),
165                     args[0].immediate(),
166                     args[1].immediate(),
167                     args[2].immediate(),
168                 );
169                 return;
170             }
171
172             sym::volatile_copy_nonoverlapping_memory => {
173                 copy_intrinsic(
174                     bx,
175                     false,
176                     true,
177                     substs.type_at(0),
178                     args[0].immediate(),
179                     args[1].immediate(),
180                     args[2].immediate(),
181                 );
182                 return;
183             }
184             sym::volatile_copy_memory => {
185                 copy_intrinsic(
186                     bx,
187                     true,
188                     true,
189                     substs.type_at(0),
190                     args[0].immediate(),
191                     args[1].immediate(),
192                     args[2].immediate(),
193                 );
194                 return;
195             }
196             sym::volatile_set_memory => {
197                 memset_intrinsic(
198                     bx,
199                     true,
200                     substs.type_at(0),
201                     args[0].immediate(),
202                     args[1].immediate(),
203                     args[2].immediate(),
204                 );
205                 return;
206             }
207             sym::volatile_store => {
208                 let dst = args[0].deref(bx.cx());
209                 args[1].val.volatile_store(bx, dst);
210                 return;
211             }
212             sym::unaligned_volatile_store => {
213                 let dst = args[0].deref(bx.cx());
214                 args[1].val.unaligned_volatile_store(bx, dst);
215                 return;
216             }
217             sym::add_with_overflow
218             | sym::sub_with_overflow
219             | sym::mul_with_overflow
220             | sym::unchecked_div
221             | sym::unchecked_rem
222             | sym::unchecked_shl
223             | sym::unchecked_shr
224             | sym::unchecked_add
225             | sym::unchecked_sub
226             | sym::unchecked_mul
227             | sym::exact_div => {
228                 let ty = arg_tys[0];
229                 match int_type_width_signed(ty, bx.tcx()) {
230                     Some((_width, signed)) => match name {
231                         sym::add_with_overflow
232                         | sym::sub_with_overflow
233                         | sym::mul_with_overflow => {
234                             let op = match name {
235                                 sym::add_with_overflow => OverflowOp::Add,
236                                 sym::sub_with_overflow => OverflowOp::Sub,
237                                 sym::mul_with_overflow => OverflowOp::Mul,
238                                 _ => bug!(),
239                             };
240                             let (val, overflow) =
241                                 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
242                             // Convert `i1` to a `bool`, and write it to the out parameter
243                             let val = bx.from_immediate(val);
244                             let overflow = bx.from_immediate(overflow);
245
246                             let dest = result.project_field(bx, 0);
247                             bx.store(val, dest.llval, dest.align);
248                             let dest = result.project_field(bx, 1);
249                             bx.store(overflow, dest.llval, dest.align);
250
251                             return;
252                         }
253                         sym::exact_div => {
254                             if signed {
255                                 bx.exactsdiv(args[0].immediate(), args[1].immediate())
256                             } else {
257                                 bx.exactudiv(args[0].immediate(), args[1].immediate())
258                             }
259                         }
260                         sym::unchecked_div => {
261                             if signed {
262                                 bx.sdiv(args[0].immediate(), args[1].immediate())
263                             } else {
264                                 bx.udiv(args[0].immediate(), args[1].immediate())
265                             }
266                         }
267                         sym::unchecked_rem => {
268                             if signed {
269                                 bx.srem(args[0].immediate(), args[1].immediate())
270                             } else {
271                                 bx.urem(args[0].immediate(), args[1].immediate())
272                             }
273                         }
274                         sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
275                         sym::unchecked_shr => {
276                             if signed {
277                                 bx.ashr(args[0].immediate(), args[1].immediate())
278                             } else {
279                                 bx.lshr(args[0].immediate(), args[1].immediate())
280                             }
281                         }
282                         sym::unchecked_add => {
283                             if signed {
284                                 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
285                             } else {
286                                 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
287                             }
288                         }
289                         sym::unchecked_sub => {
290                             if signed {
291                                 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
292                             } else {
293                                 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
294                             }
295                         }
296                         sym::unchecked_mul => {
297                             if signed {
298                                 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
299                             } else {
300                                 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
301                             }
302                         }
303                         _ => bug!(),
304                     },
305                     None => {
306                         span_invalid_monomorphization_error(
307                             bx.tcx().sess,
308                             span,
309                             &format!(
310                                 "invalid monomorphization of `{}` intrinsic: \
311                                       expected basic integer type, found `{}`",
312                                 name, ty
313                             ),
314                         );
315                         return;
316                     }
317                 }
318             }
319             sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
320                 match float_type_width(arg_tys[0]) {
321                     Some(_width) => match name {
322                         sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
323                         sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
324                         sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
325                         sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
326                         sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
327                         _ => bug!(),
328                     },
329                     None => {
330                         span_invalid_monomorphization_error(
331                             bx.tcx().sess,
332                             span,
333                             &format!(
334                                 "invalid monomorphization of `{}` intrinsic: \
335                                       expected basic float type, found `{}`",
336                                 name, arg_tys[0]
337                             ),
338                         );
339                         return;
340                     }
341                 }
342             }
343
344             sym::float_to_int_unchecked => {
345                 if float_type_width(arg_tys[0]).is_none() {
346                     span_invalid_monomorphization_error(
347                         bx.tcx().sess,
348                         span,
349                         &format!(
350                             "invalid monomorphization of `float_to_int_unchecked` \
351                                   intrinsic: expected basic float type, \
352                                   found `{}`",
353                             arg_tys[0]
354                         ),
355                     );
356                     return;
357                 }
358                 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
359                     span_invalid_monomorphization_error(
360                         bx.tcx().sess,
361                         span,
362                         &format!(
363                             "invalid monomorphization of `float_to_int_unchecked` \
364                                     intrinsic:  expected basic integer type, \
365                                     found `{}`",
366                             ret_ty
367                         ),
368                     );
369                     return;
370                 };
371                 if signed {
372                     bx.fptosi(args[0].immediate(), llret_ty)
373                 } else {
374                     bx.fptoui(args[0].immediate(), llret_ty)
375                 }
376             }
377
378             sym::discriminant_value => {
379                 if ret_ty.is_integral() {
380                     args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
381                 } else {
382                     span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
383                 }
384             }
385
386             sym::const_allocate => {
387                 // returns a null pointer at runtime.
388                 bx.const_null(bx.type_i8p())
389             }
390
391             sym::const_deallocate => {
392                 // nop at runtime.
393                 return;
394             }
395
396             // This requires that atomic intrinsics follow a specific naming pattern:
397             // "atomic_<operation>[_<ordering>]"
398             name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
399                 use crate::common::AtomicOrdering::*;
400                 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
401
402                 let Some((instruction, ordering)) = atomic.split_once('_') else {
403                     bx.sess().fatal("Atomic intrinsic missing memory ordering");
404                 };
405
406                 let parse_ordering = |bx: &Bx, s| match s {
407                     "unordered" => Unordered,
408                     "relaxed" => Relaxed,
409                     "acquire" => Acquire,
410                     "release" => Release,
411                     "acqrel" => AcquireRelease,
412                     "seqcst" => SequentiallyConsistent,
413                     _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
414                 };
415
416                 let invalid_monomorphization = |ty| {
417                     span_invalid_monomorphization_error(
418                         bx.tcx().sess,
419                         span,
420                         &format!(
421                             "invalid monomorphization of `{}` intrinsic: \
422                                   expected basic integer type, found `{}`",
423                             name, ty
424                         ),
425                     );
426                 };
427
428                 match instruction {
429                     "cxchg" | "cxchgweak" => {
430                         let Some((success, failure)) = ordering.split_once('_') else {
431                             bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
432                         };
433                         let ty = substs.type_at(0);
434                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
435                             let weak = instruction == "cxchgweak";
436                             let mut dst = args[0].immediate();
437                             let mut cmp = args[1].immediate();
438                             let mut src = args[2].immediate();
439                             if ty.is_unsafe_ptr() {
440                                 // Some platforms do not support atomic operations on pointers,
441                                 // so we cast to integer first.
442                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
443                                 dst = bx.pointercast(dst, ptr_llty);
444                                 cmp = bx.ptrtoint(cmp, bx.type_isize());
445                                 src = bx.ptrtoint(src, bx.type_isize());
446                             }
447                             let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
448                             let val = bx.extract_value(pair, 0);
449                             let success = bx.extract_value(pair, 1);
450                             let val = bx.from_immediate(val);
451                             let success = bx.from_immediate(success);
452
453                             let dest = result.project_field(bx, 0);
454                             bx.store(val, dest.llval, dest.align);
455                             let dest = result.project_field(bx, 1);
456                             bx.store(success, dest.llval, dest.align);
457                             return;
458                         } else {
459                             return invalid_monomorphization(ty);
460                         }
461                     }
462
463                     "load" => {
464                         let ty = substs.type_at(0);
465                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
466                             let layout = bx.layout_of(ty);
467                             let size = layout.size;
468                             let mut source = args[0].immediate();
469                             if ty.is_unsafe_ptr() {
470                                 // Some platforms do not support atomic operations on pointers,
471                                 // so we cast to integer first...
472                                 let llty = bx.type_isize();
473                                 let ptr_llty = bx.type_ptr_to(llty);
474                                 source = bx.pointercast(source, ptr_llty);
475                                 let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
476                                 // ... and then cast the result back to a pointer
477                                 bx.inttoptr(result, bx.backend_type(layout))
478                             } else {
479                                 bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
480                             }
481                         } else {
482                             return invalid_monomorphization(ty);
483                         }
484                     }
485
486                     "store" => {
487                         let ty = substs.type_at(0);
488                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
489                             let size = bx.layout_of(ty).size;
490                             let mut val = args[1].immediate();
491                             let mut ptr = args[0].immediate();
492                             if ty.is_unsafe_ptr() {
493                                 // Some platforms do not support atomic operations on pointers,
494                                 // so we cast to integer first.
495                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
496                                 ptr = bx.pointercast(ptr, ptr_llty);
497                                 val = bx.ptrtoint(val, bx.type_isize());
498                             }
499                             bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
500                             return;
501                         } else {
502                             return invalid_monomorphization(ty);
503                         }
504                     }
505
506                     "fence" => {
507                         bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
508                         return;
509                     }
510
511                     "singlethreadfence" => {
512                         bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
513                         return;
514                     }
515
516                     // These are all AtomicRMW ops
517                     op => {
518                         let atom_op = match op {
519                             "xchg" => AtomicRmwBinOp::AtomicXchg,
520                             "xadd" => AtomicRmwBinOp::AtomicAdd,
521                             "xsub" => AtomicRmwBinOp::AtomicSub,
522                             "and" => AtomicRmwBinOp::AtomicAnd,
523                             "nand" => AtomicRmwBinOp::AtomicNand,
524                             "or" => AtomicRmwBinOp::AtomicOr,
525                             "xor" => AtomicRmwBinOp::AtomicXor,
526                             "max" => AtomicRmwBinOp::AtomicMax,
527                             "min" => AtomicRmwBinOp::AtomicMin,
528                             "umax" => AtomicRmwBinOp::AtomicUMax,
529                             "umin" => AtomicRmwBinOp::AtomicUMin,
530                             _ => bx.sess().fatal("unknown atomic operation"),
531                         };
532
533                         let ty = substs.type_at(0);
534                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
535                             let mut ptr = args[0].immediate();
536                             let mut val = args[1].immediate();
537                             if ty.is_unsafe_ptr() {
538                                 // Some platforms do not support atomic operations on pointers,
539                                 // so we cast to integer first.
540                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
541                                 ptr = bx.pointercast(ptr, ptr_llty);
542                                 val = bx.ptrtoint(val, bx.type_isize());
543                             }
544                             bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
545                         } else {
546                             return invalid_monomorphization(ty);
547                         }
548                     }
549                 }
550             }
551
552             sym::nontemporal_store => {
553                 let dst = args[0].deref(bx.cx());
554                 args[1].val.nontemporal_store(bx, dst);
555                 return;
556             }
557
558             sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
559                 let a = args[0].immediate();
560                 let b = args[1].immediate();
561                 if name == sym::ptr_guaranteed_eq {
562                     bx.icmp(IntPredicate::IntEQ, a, b)
563                 } else {
564                     bx.icmp(IntPredicate::IntNE, a, b)
565                 }
566             }
567
568             sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
569                 let ty = substs.type_at(0);
570                 let pointee_size = bx.layout_of(ty).size;
571
572                 let a = args[0].immediate();
573                 let b = args[1].immediate();
574                 let a = bx.ptrtoint(a, bx.type_isize());
575                 let b = bx.ptrtoint(b, bx.type_isize());
576                 let pointee_size = bx.const_usize(pointee_size.bytes());
577                 if name == sym::ptr_offset_from {
578                     // This is the same sequence that Clang emits for pointer subtraction.
579                     // It can be neither `nsw` nor `nuw` because the input is treated as
580                     // unsigned but then the output is treated as signed, so neither works.
581                     let d = bx.sub(a, b);
582                     // this is where the signed magic happens (notice the `s` in `exactsdiv`)
583                     bx.exactsdiv(d, pointee_size)
584                 } else {
585                     // The `_unsigned` version knows the relative ordering of the pointers,
586                     // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
587                     let d = bx.unchecked_usub(a, b);
588                     bx.exactudiv(d, pointee_size)
589                 }
590             }
591
592             _ => {
593                 // Need to use backend-specific things in the implementation.
594                 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
595                 return;
596             }
597         };
598
599         if !fn_abi.ret.is_ignore() {
600             if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
601                 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
602                 let ptr = bx.pointercast(result.llval, ptr_llty);
603                 bx.store(llval, ptr, result.align);
604             } else {
605                 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
606                     .val
607                     .store(bx, result);
608             }
609         }
610     }
611 }
612
613 // Returns the width of an int Ty, and if it's signed or not
614 // Returns None if the type is not an integer
615 // FIXME: there’s multiple of this functions, investigate using some of the already existing
616 // stuffs.
617 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
618     match ty.kind() {
619         ty::Int(t) => {
620             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
621         }
622         ty::Uint(t) => {
623             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
624         }
625         _ => None,
626     }
627 }
628
629 // Returns the width of a float Ty
630 // Returns None if the type is not a float
631 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
632     match ty.kind() {
633         ty::Float(t) => Some(t.bit_width()),
634         _ => None,
635     }
636 }