]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Rollup merge of #98460 - GuillaumeGomez:css-simplification, r=jsha
[rust.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::FunctionCx;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5 use crate::glue;
6 use crate::traits::*;
7 use crate::MemFlags;
8
9 use rustc_middle::ty::{self, Ty, TyCtxt};
10 use rustc_span::{sym, Span};
11 use rustc_target::abi::call::{FnAbi, PassMode};
12
13 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
14     bx: &mut Bx,
15     allow_overlap: bool,
16     volatile: bool,
17     ty: Ty<'tcx>,
18     dst: Bx::Value,
19     src: Bx::Value,
20     count: Bx::Value,
21 ) {
22     let layout = bx.layout_of(ty);
23     let size = layout.size;
24     let align = layout.align.abi;
25     let size = bx.mul(bx.const_usize(size.bytes()), count);
26     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
27     if allow_overlap {
28         bx.memmove(dst, align, src, align, size, flags);
29     } else {
30         bx.memcpy(dst, align, src, align, size, flags);
31     }
32 }
33
34 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
35     bx: &mut Bx,
36     volatile: bool,
37     ty: Ty<'tcx>,
38     dst: Bx::Value,
39     val: Bx::Value,
40     count: Bx::Value,
41 ) {
42     let layout = bx.layout_of(ty);
43     let size = layout.size;
44     let align = layout.align.abi;
45     let size = bx.mul(bx.const_usize(size.bytes()), count);
46     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
47     bx.memset(dst, val, size, align, flags);
48 }
49
50 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
51     pub fn codegen_intrinsic_call(
52         bx: &mut Bx,
53         instance: ty::Instance<'tcx>,
54         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
55         args: &[OperandRef<'tcx, Bx::Value>],
56         llresult: Bx::Value,
57         span: Span,
58     ) {
59         let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
60
61         let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
62             bug!("expected fn item type, found {}", callee_ty);
63         };
64
65         let sig = callee_ty.fn_sig(bx.tcx());
66         let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
67         let arg_tys = sig.inputs();
68         let ret_ty = sig.output();
69         let name = bx.tcx().item_name(def_id);
70         let name_str = name.as_str();
71
72         let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
73         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
74
75         let llval = match name {
76             sym::assume => {
77                 bx.assume(args[0].immediate());
78                 return;
79             }
80             sym::abort => {
81                 bx.abort();
82                 return;
83             }
84
85             sym::va_start => bx.va_start(args[0].immediate()),
86             sym::va_end => bx.va_end(args[0].immediate()),
87             sym::size_of_val => {
88                 let tp_ty = substs.type_at(0);
89                 if let OperandValue::Pair(_, meta) = args[0].val {
90                     let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
91                     llsize
92                 } else {
93                     bx.const_usize(bx.layout_of(tp_ty).size.bytes())
94                 }
95             }
96             sym::min_align_of_val => {
97                 let tp_ty = substs.type_at(0);
98                 if let OperandValue::Pair(_, meta) = args[0].val {
99                     let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
100                     llalign
101                 } else {
102                     bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
103                 }
104             }
105             sym::pref_align_of
106             | sym::needs_drop
107             | sym::type_id
108             | sym::type_name
109             | sym::variant_count => {
110                 let value = bx
111                     .tcx()
112                     .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
113                     .unwrap();
114                 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
115             }
116             sym::offset => {
117                 let ty = substs.type_at(0);
118                 let layout = bx.layout_of(ty);
119                 let ptr = args[0].immediate();
120                 let offset = args[1].immediate();
121                 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
122             }
123             sym::arith_offset => {
124                 let ty = substs.type_at(0);
125                 let layout = bx.layout_of(ty);
126                 let ptr = args[0].immediate();
127                 let offset = args[1].immediate();
128                 bx.gep(bx.backend_type(layout), ptr, &[offset])
129             }
130             sym::copy => {
131                 copy_intrinsic(
132                     bx,
133                     true,
134                     false,
135                     substs.type_at(0),
136                     args[1].immediate(),
137                     args[0].immediate(),
138                     args[2].immediate(),
139                 );
140                 return;
141             }
142             sym::write_bytes => {
143                 memset_intrinsic(
144                     bx,
145                     false,
146                     substs.type_at(0),
147                     args[0].immediate(),
148                     args[1].immediate(),
149                     args[2].immediate(),
150                 );
151                 return;
152             }
153
154             sym::volatile_copy_nonoverlapping_memory => {
155                 copy_intrinsic(
156                     bx,
157                     false,
158                     true,
159                     substs.type_at(0),
160                     args[0].immediate(),
161                     args[1].immediate(),
162                     args[2].immediate(),
163                 );
164                 return;
165             }
166             sym::volatile_copy_memory => {
167                 copy_intrinsic(
168                     bx,
169                     true,
170                     true,
171                     substs.type_at(0),
172                     args[0].immediate(),
173                     args[1].immediate(),
174                     args[2].immediate(),
175                 );
176                 return;
177             }
178             sym::volatile_set_memory => {
179                 memset_intrinsic(
180                     bx,
181                     true,
182                     substs.type_at(0),
183                     args[0].immediate(),
184                     args[1].immediate(),
185                     args[2].immediate(),
186                 );
187                 return;
188             }
189             sym::volatile_store => {
190                 let dst = args[0].deref(bx.cx());
191                 args[1].val.volatile_store(bx, dst);
192                 return;
193             }
194             sym::unaligned_volatile_store => {
195                 let dst = args[0].deref(bx.cx());
196                 args[1].val.unaligned_volatile_store(bx, dst);
197                 return;
198             }
199             sym::add_with_overflow
200             | sym::sub_with_overflow
201             | sym::mul_with_overflow
202             | sym::unchecked_div
203             | sym::unchecked_rem
204             | sym::unchecked_shl
205             | sym::unchecked_shr
206             | sym::unchecked_add
207             | sym::unchecked_sub
208             | sym::unchecked_mul
209             | sym::exact_div => {
210                 let ty = arg_tys[0];
211                 match int_type_width_signed(ty, bx.tcx()) {
212                     Some((_width, signed)) => match name {
213                         sym::add_with_overflow
214                         | sym::sub_with_overflow
215                         | sym::mul_with_overflow => {
216                             let op = match name {
217                                 sym::add_with_overflow => OverflowOp::Add,
218                                 sym::sub_with_overflow => OverflowOp::Sub,
219                                 sym::mul_with_overflow => OverflowOp::Mul,
220                                 _ => bug!(),
221                             };
222                             let (val, overflow) =
223                                 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
224                             // Convert `i1` to a `bool`, and write it to the out parameter
225                             let val = bx.from_immediate(val);
226                             let overflow = bx.from_immediate(overflow);
227
228                             let dest = result.project_field(bx, 0);
229                             bx.store(val, dest.llval, dest.align);
230                             let dest = result.project_field(bx, 1);
231                             bx.store(overflow, dest.llval, dest.align);
232
233                             return;
234                         }
235                         sym::exact_div => {
236                             if signed {
237                                 bx.exactsdiv(args[0].immediate(), args[1].immediate())
238                             } else {
239                                 bx.exactudiv(args[0].immediate(), args[1].immediate())
240                             }
241                         }
242                         sym::unchecked_div => {
243                             if signed {
244                                 bx.sdiv(args[0].immediate(), args[1].immediate())
245                             } else {
246                                 bx.udiv(args[0].immediate(), args[1].immediate())
247                             }
248                         }
249                         sym::unchecked_rem => {
250                             if signed {
251                                 bx.srem(args[0].immediate(), args[1].immediate())
252                             } else {
253                                 bx.urem(args[0].immediate(), args[1].immediate())
254                             }
255                         }
256                         sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
257                         sym::unchecked_shr => {
258                             if signed {
259                                 bx.ashr(args[0].immediate(), args[1].immediate())
260                             } else {
261                                 bx.lshr(args[0].immediate(), args[1].immediate())
262                             }
263                         }
264                         sym::unchecked_add => {
265                             if signed {
266                                 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
267                             } else {
268                                 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
269                             }
270                         }
271                         sym::unchecked_sub => {
272                             if signed {
273                                 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
274                             } else {
275                                 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
276                             }
277                         }
278                         sym::unchecked_mul => {
279                             if signed {
280                                 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
281                             } else {
282                                 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
283                             }
284                         }
285                         _ => bug!(),
286                     },
287                     None => {
288                         span_invalid_monomorphization_error(
289                             bx.tcx().sess,
290                             span,
291                             &format!(
292                                 "invalid monomorphization of `{}` intrinsic: \
293                                       expected basic integer type, found `{}`",
294                                 name, ty
295                             ),
296                         );
297                         return;
298                     }
299                 }
300             }
301             sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
302                 match float_type_width(arg_tys[0]) {
303                     Some(_width) => match name {
304                         sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
305                         sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
306                         sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
307                         sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
308                         sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
309                         _ => bug!(),
310                     },
311                     None => {
312                         span_invalid_monomorphization_error(
313                             bx.tcx().sess,
314                             span,
315                             &format!(
316                                 "invalid monomorphization of `{}` intrinsic: \
317                                       expected basic float type, found `{}`",
318                                 name, arg_tys[0]
319                             ),
320                         );
321                         return;
322                     }
323                 }
324             }
325
326             sym::float_to_int_unchecked => {
327                 if float_type_width(arg_tys[0]).is_none() {
328                     span_invalid_monomorphization_error(
329                         bx.tcx().sess,
330                         span,
331                         &format!(
332                             "invalid monomorphization of `float_to_int_unchecked` \
333                                   intrinsic: expected basic float type, \
334                                   found `{}`",
335                             arg_tys[0]
336                         ),
337                     );
338                     return;
339                 }
340                 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
341                     span_invalid_monomorphization_error(
342                         bx.tcx().sess,
343                         span,
344                         &format!(
345                             "invalid monomorphization of `float_to_int_unchecked` \
346                                     intrinsic:  expected basic integer type, \
347                                     found `{}`",
348                             ret_ty
349                         ),
350                     );
351                     return;
352                 };
353                 if signed {
354                     bx.fptosi(args[0].immediate(), llret_ty)
355                 } else {
356                     bx.fptoui(args[0].immediate(), llret_ty)
357                 }
358             }
359
360             sym::discriminant_value => {
361                 if ret_ty.is_integral() {
362                     args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
363                 } else {
364                     span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
365                 }
366             }
367
368             sym::const_allocate => {
369                 // returns a null pointer at runtime.
370                 bx.const_null(bx.type_i8p())
371             }
372
373             sym::const_deallocate => {
374                 // nop at runtime.
375                 return;
376             }
377
378             // This requires that atomic intrinsics follow a specific naming pattern:
379             // "atomic_<operation>[_<ordering>]"
380             name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
381                 use crate::common::AtomicOrdering::*;
382                 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
383
384                 let Some((instruction, ordering)) = atomic.split_once('_') else {
385                     bx.sess().fatal("Atomic intrinsic missing memory ordering");
386                 };
387
388                 let parse_ordering = |bx: &Bx, s| match s {
389                     "unordered" => Unordered,
390                     "relaxed" => Relaxed,
391                     "acquire" => Acquire,
392                     "release" => Release,
393                     "acqrel" => AcquireRelease,
394                     "seqcst" => SequentiallyConsistent,
395                     _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
396                 };
397
398                 let invalid_monomorphization = |ty| {
399                     span_invalid_monomorphization_error(
400                         bx.tcx().sess,
401                         span,
402                         &format!(
403                             "invalid monomorphization of `{}` intrinsic: \
404                                   expected basic integer type, found `{}`",
405                             name, ty
406                         ),
407                     );
408                 };
409
410                 match instruction {
411                     "cxchg" | "cxchgweak" => {
412                         let Some((success, failure)) = ordering.split_once('_') else {
413                             bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
414                         };
415                         let ty = substs.type_at(0);
416                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
417                             let weak = instruction == "cxchgweak";
418                             let mut dst = args[0].immediate();
419                             let mut cmp = args[1].immediate();
420                             let mut src = args[2].immediate();
421                             if ty.is_unsafe_ptr() {
422                                 // Some platforms do not support atomic operations on pointers,
423                                 // so we cast to integer first.
424                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
425                                 dst = bx.pointercast(dst, ptr_llty);
426                                 cmp = bx.ptrtoint(cmp, bx.type_isize());
427                                 src = bx.ptrtoint(src, bx.type_isize());
428                             }
429                             let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
430                             let val = bx.extract_value(pair, 0);
431                             let success = bx.extract_value(pair, 1);
432                             let val = bx.from_immediate(val);
433                             let success = bx.from_immediate(success);
434
435                             let dest = result.project_field(bx, 0);
436                             bx.store(val, dest.llval, dest.align);
437                             let dest = result.project_field(bx, 1);
438                             bx.store(success, dest.llval, dest.align);
439                             return;
440                         } else {
441                             return invalid_monomorphization(ty);
442                         }
443                     }
444
445                     "load" => {
446                         let ty = substs.type_at(0);
447                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
448                             let layout = bx.layout_of(ty);
449                             let size = layout.size;
450                             let mut source = args[0].immediate();
451                             if ty.is_unsafe_ptr() {
452                                 // Some platforms do not support atomic operations on pointers,
453                                 // so we cast to integer first...
454                                 let llty = bx.type_isize();
455                                 let ptr_llty = bx.type_ptr_to(llty);
456                                 source = bx.pointercast(source, ptr_llty);
457                                 let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
458                                 // ... and then cast the result back to a pointer
459                                 bx.inttoptr(result, bx.backend_type(layout))
460                             } else {
461                                 bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
462                             }
463                         } else {
464                             return invalid_monomorphization(ty);
465                         }
466                     }
467
468                     "store" => {
469                         let ty = substs.type_at(0);
470                         if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
471                             let size = bx.layout_of(ty).size;
472                             let mut val = args[1].immediate();
473                             let mut ptr = args[0].immediate();
474                             if ty.is_unsafe_ptr() {
475                                 // Some platforms do not support atomic operations on pointers,
476                                 // so we cast to integer first.
477                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
478                                 ptr = bx.pointercast(ptr, ptr_llty);
479                                 val = bx.ptrtoint(val, bx.type_isize());
480                             }
481                             bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
482                             return;
483                         } else {
484                             return invalid_monomorphization(ty);
485                         }
486                     }
487
488                     "fence" => {
489                         bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
490                         return;
491                     }
492
493                     "singlethreadfence" => {
494                         bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
495                         return;
496                     }
497
498                     // These are all AtomicRMW ops
499                     op => {
500                         let atom_op = match op {
501                             "xchg" => AtomicRmwBinOp::AtomicXchg,
502                             "xadd" => AtomicRmwBinOp::AtomicAdd,
503                             "xsub" => AtomicRmwBinOp::AtomicSub,
504                             "and" => AtomicRmwBinOp::AtomicAnd,
505                             "nand" => AtomicRmwBinOp::AtomicNand,
506                             "or" => AtomicRmwBinOp::AtomicOr,
507                             "xor" => AtomicRmwBinOp::AtomicXor,
508                             "max" => AtomicRmwBinOp::AtomicMax,
509                             "min" => AtomicRmwBinOp::AtomicMin,
510                             "umax" => AtomicRmwBinOp::AtomicUMax,
511                             "umin" => AtomicRmwBinOp::AtomicUMin,
512                             _ => bx.sess().fatal("unknown atomic operation"),
513                         };
514
515                         let ty = substs.type_at(0);
516                         if int_type_width_signed(ty, bx.tcx()).is_some()
517                             || (ty.is_unsafe_ptr() && op == "xchg")
518                         {
519                             let mut ptr = args[0].immediate();
520                             let mut val = args[1].immediate();
521                             if ty.is_unsafe_ptr() {
522                                 // Some platforms do not support atomic operations on pointers,
523                                 // so we cast to integer first.
524                                 let ptr_llty = bx.type_ptr_to(bx.type_isize());
525                                 ptr = bx.pointercast(ptr, ptr_llty);
526                                 val = bx.ptrtoint(val, bx.type_isize());
527                             }
528                             bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
529                         } else {
530                             return invalid_monomorphization(ty);
531                         }
532                     }
533                 }
534             }
535
536             sym::nontemporal_store => {
537                 let dst = args[0].deref(bx.cx());
538                 args[1].val.nontemporal_store(bx, dst);
539                 return;
540             }
541
542             sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
543                 let a = args[0].immediate();
544                 let b = args[1].immediate();
545                 if name == sym::ptr_guaranteed_eq {
546                     bx.icmp(IntPredicate::IntEQ, a, b)
547                 } else {
548                     bx.icmp(IntPredicate::IntNE, a, b)
549                 }
550             }
551
552             sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
553                 let ty = substs.type_at(0);
554                 let pointee_size = bx.layout_of(ty).size;
555
556                 let a = args[0].immediate();
557                 let b = args[1].immediate();
558                 let a = bx.ptrtoint(a, bx.type_isize());
559                 let b = bx.ptrtoint(b, bx.type_isize());
560                 let pointee_size = bx.const_usize(pointee_size.bytes());
561                 if name == sym::ptr_offset_from {
562                     // This is the same sequence that Clang emits for pointer subtraction.
563                     // It can be neither `nsw` nor `nuw` because the input is treated as
564                     // unsigned but then the output is treated as signed, so neither works.
565                     let d = bx.sub(a, b);
566                     // this is where the signed magic happens (notice the `s` in `exactsdiv`)
567                     bx.exactsdiv(d, pointee_size)
568                 } else {
569                     // The `_unsigned` version knows the relative ordering of the pointers,
570                     // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
571                     let d = bx.unchecked_usub(a, b);
572                     bx.exactudiv(d, pointee_size)
573                 }
574             }
575
576             _ => {
577                 // Need to use backend-specific things in the implementation.
578                 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
579                 return;
580             }
581         };
582
583         if !fn_abi.ret.is_ignore() {
584             if let PassMode::Cast(ty) = fn_abi.ret.mode {
585                 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
586                 let ptr = bx.pointercast(result.llval, ptr_llty);
587                 bx.store(llval, ptr, result.align);
588             } else {
589                 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
590                     .val
591                     .store(bx, result);
592             }
593         }
594     }
595 }
596
597 // Returns the width of an int Ty, and if it's signed or not
598 // Returns None if the type is not an integer
599 // FIXME: there’s multiple of this functions, investigate using some of the already existing
600 // stuffs.
601 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
602     match ty.kind() {
603         ty::Int(t) => {
604             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
605         }
606         ty::Uint(t) => {
607             Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
608         }
609         _ => None,
610     }
611 }
612
613 // Returns the width of a float Ty
614 // Returns None if the type is not a float
615 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
616     match ty.kind() {
617         ty::Float(t) => Some(t.bit_width()),
618         _ => None,
619     }
620 }