]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_llvm/src/intrinsic.rs
Rollup merge of #105847 - compiler-errors:issue-104396, r=oli-obk
[rust.git] / compiler / rustc_codegen_llvm / src / intrinsic.rs
1 use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
4 use crate::llvm;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::va_arg::emit_va_arg;
8 use crate::value::Value;
9
10 use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
11 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
12 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
13 use rustc_codegen_ssa::mir::operand::OperandRef;
14 use rustc_codegen_ssa::mir::place::PlaceRef;
15 use rustc_codegen_ssa::traits::*;
16 use rustc_hir as hir;
17 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
18 use rustc_middle::ty::{self, Ty};
19 use rustc_middle::{bug, span_bug};
20 use rustc_span::{sym, symbol::kw, Span, Symbol};
21 use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
22 use rustc_target::spec::{HasTargetSpec, PanicStrategy};
23
24 use std::cmp::Ordering;
25 use std::iter;
26
27 fn get_simple_intrinsic<'ll>(
28     cx: &CodegenCx<'ll, '_>,
29     name: Symbol,
30 ) -> Option<(&'ll Type, &'ll Value)> {
31     let llvm_name = match name {
32         sym::sqrtf32 => "llvm.sqrt.f32",
33         sym::sqrtf64 => "llvm.sqrt.f64",
34         sym::powif32 => "llvm.powi.f32",
35         sym::powif64 => "llvm.powi.f64",
36         sym::sinf32 => "llvm.sin.f32",
37         sym::sinf64 => "llvm.sin.f64",
38         sym::cosf32 => "llvm.cos.f32",
39         sym::cosf64 => "llvm.cos.f64",
40         sym::powf32 => "llvm.pow.f32",
41         sym::powf64 => "llvm.pow.f64",
42         sym::expf32 => "llvm.exp.f32",
43         sym::expf64 => "llvm.exp.f64",
44         sym::exp2f32 => "llvm.exp2.f32",
45         sym::exp2f64 => "llvm.exp2.f64",
46         sym::logf32 => "llvm.log.f32",
47         sym::logf64 => "llvm.log.f64",
48         sym::log10f32 => "llvm.log10.f32",
49         sym::log10f64 => "llvm.log10.f64",
50         sym::log2f32 => "llvm.log2.f32",
51         sym::log2f64 => "llvm.log2.f64",
52         sym::fmaf32 => "llvm.fma.f32",
53         sym::fmaf64 => "llvm.fma.f64",
54         sym::fabsf32 => "llvm.fabs.f32",
55         sym::fabsf64 => "llvm.fabs.f64",
56         sym::minnumf32 => "llvm.minnum.f32",
57         sym::minnumf64 => "llvm.minnum.f64",
58         sym::maxnumf32 => "llvm.maxnum.f32",
59         sym::maxnumf64 => "llvm.maxnum.f64",
60         sym::copysignf32 => "llvm.copysign.f32",
61         sym::copysignf64 => "llvm.copysign.f64",
62         sym::floorf32 => "llvm.floor.f32",
63         sym::floorf64 => "llvm.floor.f64",
64         sym::ceilf32 => "llvm.ceil.f32",
65         sym::ceilf64 => "llvm.ceil.f64",
66         sym::truncf32 => "llvm.trunc.f32",
67         sym::truncf64 => "llvm.trunc.f64",
68         sym::rintf32 => "llvm.rint.f32",
69         sym::rintf64 => "llvm.rint.f64",
70         sym::nearbyintf32 => "llvm.nearbyint.f32",
71         sym::nearbyintf64 => "llvm.nearbyint.f64",
72         sym::roundf32 => "llvm.round.f32",
73         sym::roundf64 => "llvm.round.f64",
74         sym::ptr_mask => "llvm.ptrmask",
75         _ => return None,
76     };
77     Some(cx.get_intrinsic(llvm_name))
78 }
79
80 impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
81     fn codegen_intrinsic_call(
82         &mut self,
83         instance: ty::Instance<'tcx>,
84         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
85         args: &[OperandRef<'tcx, &'ll Value>],
86         llresult: &'ll Value,
87         span: Span,
88     ) {
89         let tcx = self.tcx;
90         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
91
92         let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
93             bug!("expected fn item type, found {}", callee_ty);
94         };
95
96         let sig = callee_ty.fn_sig(tcx);
97         let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
98         let arg_tys = sig.inputs();
99         let ret_ty = sig.output();
100         let name = tcx.item_name(def_id);
101
102         let llret_ty = self.layout_of(ret_ty).llvm_type(self);
103         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
104
105         let simple = get_simple_intrinsic(self, name);
106         let llval = match name {
107             _ if simple.is_some() => {
108                 let (simple_ty, simple_fn) = simple.unwrap();
109                 self.call(
110                     simple_ty,
111                     None,
112                     simple_fn,
113                     &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
114                     None,
115                 )
116             }
117             sym::likely => {
118                 self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
119             }
120             sym::unlikely => self
121                 .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
122             kw::Try => {
123                 try_intrinsic(
124                     self,
125                     args[0].immediate(),
126                     args[1].immediate(),
127                     args[2].immediate(),
128                     llresult,
129                 );
130                 return;
131             }
132             sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
133             sym::va_copy => {
134                 self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
135             }
136             sym::va_arg => {
137                 match fn_abi.ret.layout.abi {
138                     abi::Abi::Scalar(scalar) => {
139                         match scalar.primitive() {
140                             Primitive::Int(..) => {
141                                 if self.cx().size_of(ret_ty).bytes() < 4 {
142                                     // `va_arg` should not be called on an integer type
143                                     // less than 4 bytes in length. If it is, promote
144                                     // the integer to an `i32` and truncate the result
145                                     // back to the smaller type.
146                                     let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
147                                     self.trunc(promoted_result, llret_ty)
148                                 } else {
149                                     emit_va_arg(self, args[0], ret_ty)
150                                 }
151                             }
152                             Primitive::F64 | Primitive::Pointer => {
153                                 emit_va_arg(self, args[0], ret_ty)
154                             }
155                             // `va_arg` should never be used with the return type f32.
156                             Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
157                         }
158                     }
159                     _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
160                 }
161             }
162
163             sym::volatile_load | sym::unaligned_volatile_load => {
164                 let tp_ty = substs.type_at(0);
165                 let ptr = args[0].immediate();
166                 let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
167                     let llty = ty.llvm_type(self);
168                     let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
169                     self.volatile_load(llty, ptr)
170                 } else {
171                     self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
172                 };
173                 let align = if name == sym::unaligned_volatile_load {
174                     1
175                 } else {
176                     self.align_of(tp_ty).bytes() as u32
177                 };
178                 unsafe {
179                     llvm::LLVMSetAlignment(load, align);
180                 }
181                 self.to_immediate(load, self.layout_of(tp_ty))
182             }
183             sym::volatile_store => {
184                 let dst = args[0].deref(self.cx());
185                 args[1].val.volatile_store(self, dst);
186                 return;
187             }
188             sym::unaligned_volatile_store => {
189                 let dst = args[0].deref(self.cx());
190                 args[1].val.unaligned_volatile_store(self, dst);
191                 return;
192             }
193             sym::prefetch_read_data
194             | sym::prefetch_write_data
195             | sym::prefetch_read_instruction
196             | sym::prefetch_write_instruction => {
197                 let (rw, cache_type) = match name {
198                     sym::prefetch_read_data => (0, 1),
199                     sym::prefetch_write_data => (1, 1),
200                     sym::prefetch_read_instruction => (0, 0),
201                     sym::prefetch_write_instruction => (1, 0),
202                     _ => bug!(),
203                 };
204                 self.call_intrinsic(
205                     "llvm.prefetch",
206                     &[
207                         args[0].immediate(),
208                         self.const_i32(rw),
209                         args[1].immediate(),
210                         self.const_i32(cache_type),
211                     ],
212                 )
213             }
214             sym::ctlz
215             | sym::ctlz_nonzero
216             | sym::cttz
217             | sym::cttz_nonzero
218             | sym::ctpop
219             | sym::bswap
220             | sym::bitreverse
221             | sym::rotate_left
222             | sym::rotate_right
223             | sym::saturating_add
224             | sym::saturating_sub => {
225                 let ty = arg_tys[0];
226                 match int_type_width_signed(ty, self) {
227                     Some((width, signed)) => match name {
228                         sym::ctlz | sym::cttz => {
229                             let y = self.const_bool(false);
230                             self.call_intrinsic(
231                                 &format!("llvm.{}.i{}", name, width),
232                                 &[args[0].immediate(), y],
233                             )
234                         }
235                         sym::ctlz_nonzero => {
236                             let y = self.const_bool(true);
237                             let llvm_name = &format!("llvm.ctlz.i{}", width);
238                             self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
239                         }
240                         sym::cttz_nonzero => {
241                             let y = self.const_bool(true);
242                             let llvm_name = &format!("llvm.cttz.i{}", width);
243                             self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
244                         }
245                         sym::ctpop => self.call_intrinsic(
246                             &format!("llvm.ctpop.i{}", width),
247                             &[args[0].immediate()],
248                         ),
249                         sym::bswap => {
250                             if width == 8 {
251                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
252                             } else {
253                                 self.call_intrinsic(
254                                     &format!("llvm.bswap.i{}", width),
255                                     &[args[0].immediate()],
256                                 )
257                             }
258                         }
259                         sym::bitreverse => self.call_intrinsic(
260                             &format!("llvm.bitreverse.i{}", width),
261                             &[args[0].immediate()],
262                         ),
263                         sym::rotate_left | sym::rotate_right => {
264                             let is_left = name == sym::rotate_left;
265                             let val = args[0].immediate();
266                             let raw_shift = args[1].immediate();
267                             // rotate = funnel shift with first two args the same
268                             let llvm_name =
269                                 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
270                             self.call_intrinsic(llvm_name, &[val, val, raw_shift])
271                         }
272                         sym::saturating_add | sym::saturating_sub => {
273                             let is_add = name == sym::saturating_add;
274                             let lhs = args[0].immediate();
275                             let rhs = args[1].immediate();
276                             let llvm_name = &format!(
277                                 "llvm.{}{}.sat.i{}",
278                                 if signed { 's' } else { 'u' },
279                                 if is_add { "add" } else { "sub" },
280                                 width
281                             );
282                             self.call_intrinsic(llvm_name, &[lhs, rhs])
283                         }
284                         _ => bug!(),
285                     },
286                     None => {
287                         span_invalid_monomorphization_error(
288                             tcx.sess,
289                             span,
290                             &format!(
291                                 "invalid monomorphization of `{}` intrinsic: \
292                                       expected basic integer type, found `{}`",
293                                 name, ty
294                             ),
295                         );
296                         return;
297                     }
298                 }
299             }
300
301             sym::raw_eq => {
302                 use abi::Abi::*;
303                 let tp_ty = substs.type_at(0);
304                 let layout = self.layout_of(tp_ty).layout;
305                 let use_integer_compare = match layout.abi() {
306                     Scalar(_) | ScalarPair(_, _) => true,
307                     Uninhabited | Vector { .. } => false,
308                     Aggregate { .. } => {
309                         // For rusty ABIs, small aggregates are actually passed
310                         // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
311                         // so we re-use that same threshold here.
312                         layout.size() <= self.data_layout().pointer_size * 2
313                     }
314                 };
315
316                 let a = args[0].immediate();
317                 let b = args[1].immediate();
318                 if layout.size().bytes() == 0 {
319                     self.const_bool(true)
320                 } else if use_integer_compare {
321                     let integer_ty = self.type_ix(layout.size().bits());
322                     let ptr_ty = self.type_ptr_to(integer_ty);
323                     let a_ptr = self.bitcast(a, ptr_ty);
324                     let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
325                     let b_ptr = self.bitcast(b, ptr_ty);
326                     let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
327                     self.icmp(IntPredicate::IntEQ, a_val, b_val)
328                 } else {
329                     let i8p_ty = self.type_i8p();
330                     let a_ptr = self.bitcast(a, i8p_ty);
331                     let b_ptr = self.bitcast(b, i8p_ty);
332                     let n = self.const_usize(layout.size().bytes());
333                     let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
334                     match self.cx.sess().target.arch.as_ref() {
335                         "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
336                         _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
337                     }
338                 }
339             }
340
341             sym::black_box => {
342                 args[0].val.store(self, result);
343                 let result_val_span = [result.llval];
344                 // We need to "use" the argument in some way LLVM can't introspect, and on
345                 // targets that support it we can typically leverage inline assembly to do
346                 // this. LLVM's interpretation of inline assembly is that it's, well, a black
347                 // box. This isn't the greatest implementation since it probably deoptimizes
348                 // more than we want, but it's so far good enough.
349                 //
350                 // For zero-sized types, the location pointed to by the result may be
351                 // uninitialized. Do not "use" the result in this case; instead just clobber
352                 // the memory.
353                 let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
354                     ("~{memory}", &[])
355                 } else {
356                     ("r,~{memory}", &result_val_span)
357                 };
358                 crate::asm::inline_asm_call(
359                     self,
360                     "",
361                     constraint,
362                     inputs,
363                     self.type_void(),
364                     true,
365                     false,
366                     llvm::AsmDialect::Att,
367                     &[span],
368                     false,
369                     None,
370                 )
371                 .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
372
373                 // We have copied the value to `result` already.
374                 return;
375             }
376
377             _ if name.as_str().starts_with("simd_") => {
378                 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
379                     Ok(llval) => llval,
380                     Err(()) => return,
381                 }
382             }
383
384             _ => bug!("unknown intrinsic '{}'", name),
385         };
386
387         if !fn_abi.ret.is_ignore() {
388             if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
389                 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
390                 let ptr = self.pointercast(result.llval, ptr_llty);
391                 self.store(llval, ptr, result.align);
392             } else {
393                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
394                     .val
395                     .store(self, result);
396             }
397         }
398     }
399
400     fn abort(&mut self) {
401         self.call_intrinsic("llvm.trap", &[]);
402     }
403
404     fn assume(&mut self, val: Self::Value) {
405         self.call_intrinsic("llvm.assume", &[val]);
406     }
407
408     fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
409         self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
410     }
411
412     fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
413         // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
414         // optimization pass replaces calls to this intrinsic with code to test type membership.
415         let i8p_ty = self.type_i8p();
416         let bitcast = self.bitcast(pointer, i8p_ty);
417         self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
418     }
419
420     fn type_checked_load(
421         &mut self,
422         llvtable: &'ll Value,
423         vtable_byte_offset: u64,
424         typeid: &'ll Value,
425     ) -> Self::Value {
426         let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
427         let type_checked_load =
428             self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
429         self.extract_value(type_checked_load, 0)
430     }
431
432     fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
433         self.call_intrinsic("llvm.va_start", &[va_list])
434     }
435
436     fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
437         self.call_intrinsic("llvm.va_end", &[va_list])
438     }
439 }
440
441 fn try_intrinsic<'ll>(
442     bx: &mut Builder<'_, 'll, '_>,
443     try_func: &'ll Value,
444     data: &'ll Value,
445     catch_func: &'ll Value,
446     dest: &'ll Value,
447 ) {
448     if bx.sess().panic_strategy() == PanicStrategy::Abort {
449         let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
450         bx.call(try_func_ty, None, try_func, &[data], None);
451         // Return 0 unconditionally from the intrinsic call;
452         // we can never unwind.
453         let ret_align = bx.tcx().data_layout.i32_align.abi;
454         bx.store(bx.const_i32(0), dest, ret_align);
455     } else if wants_msvc_seh(bx.sess()) {
456         codegen_msvc_try(bx, try_func, data, catch_func, dest);
457     } else if bx.sess().target.os == "emscripten" {
458         codegen_emcc_try(bx, try_func, data, catch_func, dest);
459     } else {
460         codegen_gnu_try(bx, try_func, data, catch_func, dest);
461     }
462 }
463
464 // MSVC's definition of the `rust_try` function.
465 //
466 // This implementation uses the new exception handling instructions in LLVM
467 // which have support in LLVM for SEH on MSVC targets. Although these
468 // instructions are meant to work for all targets, as of the time of this
469 // writing, however, LLVM does not recommend the usage of these new instructions
470 // as the old ones are still more optimized.
471 fn codegen_msvc_try<'ll>(
472     bx: &mut Builder<'_, 'll, '_>,
473     try_func: &'ll Value,
474     data: &'ll Value,
475     catch_func: &'ll Value,
476     dest: &'ll Value,
477 ) {
478     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
479         bx.set_personality_fn(bx.eh_personality());
480
481         let normal = bx.append_sibling_block("normal");
482         let catchswitch = bx.append_sibling_block("catchswitch");
483         let catchpad_rust = bx.append_sibling_block("catchpad_rust");
484         let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
485         let caught = bx.append_sibling_block("caught");
486
487         let try_func = llvm::get_param(bx.llfn(), 0);
488         let data = llvm::get_param(bx.llfn(), 1);
489         let catch_func = llvm::get_param(bx.llfn(), 2);
490
491         // We're generating an IR snippet that looks like:
492         //
493         //   declare i32 @rust_try(%try_func, %data, %catch_func) {
494         //      %slot = alloca i8*
495         //      invoke %try_func(%data) to label %normal unwind label %catchswitch
496         //
497         //   normal:
498         //      ret i32 0
499         //
500         //   catchswitch:
501         //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
502         //
503         //   catchpad_rust:
504         //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
505         //      %ptr = load %slot
506         //      call %catch_func(%data, %ptr)
507         //      catchret from %tok to label %caught
508         //
509         //   catchpad_foreign:
510         //      %tok = catchpad within %cs [null, 64, null]
511         //      call %catch_func(%data, null)
512         //      catchret from %tok to label %caught
513         //
514         //   caught:
515         //      ret i32 1
516         //   }
517         //
518         // This structure follows the basic usage of throw/try/catch in LLVM.
519         // For example, compile this C++ snippet to see what LLVM generates:
520         //
521         //      struct rust_panic {
522         //          rust_panic(const rust_panic&);
523         //          ~rust_panic();
524         //
525         //          void* x[2];
526         //      };
527         //
528         //      int __rust_try(
529         //          void (*try_func)(void*),
530         //          void *data,
531         //          void (*catch_func)(void*, void*) noexcept
532         //      ) {
533         //          try {
534         //              try_func(data);
535         //              return 0;
536         //          } catch(rust_panic& a) {
537         //              catch_func(data, &a);
538         //              return 1;
539         //          } catch(...) {
540         //              catch_func(data, NULL);
541         //              return 1;
542         //          }
543         //      }
544         //
545         // More information can be found in libstd's seh.rs implementation.
546         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
547         let slot = bx.alloca(bx.type_i8p(), ptr_align);
548         let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
549         bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None);
550
551         bx.switch_to_block(normal);
552         bx.ret(bx.const_i32(0));
553
554         bx.switch_to_block(catchswitch);
555         let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
556
557         // We can't use the TypeDescriptor defined in libpanic_unwind because it
558         // might be in another DLL and the SEH encoding only supports specifying
559         // a TypeDescriptor from the current module.
560         //
561         // However this isn't an issue since the MSVC runtime uses string
562         // comparison on the type name to match TypeDescriptors rather than
563         // pointer equality.
564         //
565         // So instead we generate a new TypeDescriptor in each module that uses
566         // `try` and let the linker merge duplicate definitions in the same
567         // module.
568         //
569         // When modifying, make sure that the type_name string exactly matches
570         // the one used in src/libpanic_unwind/seh.rs.
571         let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
572         let type_name = bx.const_bytes(b"rust_panic\0");
573         let type_info =
574             bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
575         let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
576         unsafe {
577             llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
578             llvm::SetUniqueComdat(bx.llmod, tydesc);
579             llvm::LLVMSetInitializer(tydesc, type_info);
580         }
581
582         // The flag value of 8 indicates that we are catching the exception by
583         // reference instead of by value. We can't use catch by value because
584         // that requires copying the exception object, which we don't support
585         // since our exception object effectively contains a Box.
586         //
587         // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
588         bx.switch_to_block(catchpad_rust);
589         let flags = bx.const_i32(8);
590         let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
591         let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
592         let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
593         bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet));
594         bx.catch_ret(&funclet, caught);
595
596         // The flag value of 64 indicates a "catch-all".
597         bx.switch_to_block(catchpad_foreign);
598         let flags = bx.const_i32(64);
599         let null = bx.const_null(bx.type_i8p());
600         let funclet = bx.catch_pad(cs, &[null, flags, null]);
601         bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet));
602         bx.catch_ret(&funclet, caught);
603
604         bx.switch_to_block(caught);
605         bx.ret(bx.const_i32(1));
606     });
607
608     // Note that no invoke is used here because by definition this function
609     // can't panic (that's what it's catching).
610     let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
611     let i32_align = bx.tcx().data_layout.i32_align.abi;
612     bx.store(ret, dest, i32_align);
613 }
614
615 // Definition of the standard `try` function for Rust using the GNU-like model
616 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
617 // instructions).
618 //
619 // This codegen is a little surprising because we always call a shim
620 // function instead of inlining the call to `invoke` manually here. This is done
621 // because in LLVM we're only allowed to have one personality per function
622 // definition. The call to the `try` intrinsic is being inlined into the
623 // function calling it, and that function may already have other personality
624 // functions in play. By calling a shim we're guaranteed that our shim will have
625 // the right personality function.
626 fn codegen_gnu_try<'ll>(
627     bx: &mut Builder<'_, 'll, '_>,
628     try_func: &'ll Value,
629     data: &'ll Value,
630     catch_func: &'ll Value,
631     dest: &'ll Value,
632 ) {
633     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
634         // Codegens the shims described above:
635         //
636         //   bx:
637         //      invoke %try_func(%data) normal %normal unwind %catch
638         //
639         //   normal:
640         //      ret 0
641         //
642         //   catch:
643         //      (%ptr, _) = landingpad
644         //      call %catch_func(%data, %ptr)
645         //      ret 1
646         let then = bx.append_sibling_block("then");
647         let catch = bx.append_sibling_block("catch");
648
649         let try_func = llvm::get_param(bx.llfn(), 0);
650         let data = llvm::get_param(bx.llfn(), 1);
651         let catch_func = llvm::get_param(bx.llfn(), 2);
652         let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
653         bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
654
655         bx.switch_to_block(then);
656         bx.ret(bx.const_i32(0));
657
658         // Type indicator for the exception being thrown.
659         //
660         // The first value in this tuple is a pointer to the exception object
661         // being thrown.  The second value is a "selector" indicating which of
662         // the landing pad clauses the exception's type had been matched to.
663         // rust_try ignores the selector.
664         bx.switch_to_block(catch);
665         let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
666         let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
667         let tydesc = bx.const_null(bx.type_i8p());
668         bx.add_clause(vals, tydesc);
669         let ptr = bx.extract_value(vals, 0);
670         let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
671         bx.call(catch_ty, None, catch_func, &[data, ptr], None);
672         bx.ret(bx.const_i32(1));
673     });
674
675     // Note that no invoke is used here because by definition this function
676     // can't panic (that's what it's catching).
677     let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
678     let i32_align = bx.tcx().data_layout.i32_align.abi;
679     bx.store(ret, dest, i32_align);
680 }
681
682 // Variant of codegen_gnu_try used for emscripten where Rust panics are
683 // implemented using C++ exceptions. Here we use exceptions of a specific type
684 // (`struct rust_panic`) to represent Rust panics.
685 fn codegen_emcc_try<'ll>(
686     bx: &mut Builder<'_, 'll, '_>,
687     try_func: &'ll Value,
688     data: &'ll Value,
689     catch_func: &'ll Value,
690     dest: &'ll Value,
691 ) {
692     let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
693         // Codegens the shims described above:
694         //
695         //   bx:
696         //      invoke %try_func(%data) normal %normal unwind %catch
697         //
698         //   normal:
699         //      ret 0
700         //
701         //   catch:
702         //      (%ptr, %selector) = landingpad
703         //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
704         //      %is_rust_panic = %selector == %rust_typeid
705         //      %catch_data = alloca { i8*, i8 }
706         //      %catch_data[0] = %ptr
707         //      %catch_data[1] = %is_rust_panic
708         //      call %catch_func(%data, %catch_data)
709         //      ret 1
710         let then = bx.append_sibling_block("then");
711         let catch = bx.append_sibling_block("catch");
712
713         let try_func = llvm::get_param(bx.llfn(), 0);
714         let data = llvm::get_param(bx.llfn(), 1);
715         let catch_func = llvm::get_param(bx.llfn(), 2);
716         let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
717         bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
718
719         bx.switch_to_block(then);
720         bx.ret(bx.const_i32(0));
721
722         // Type indicator for the exception being thrown.
723         //
724         // The first value in this tuple is a pointer to the exception object
725         // being thrown.  The second value is a "selector" indicating which of
726         // the landing pad clauses the exception's type had been matched to.
727         bx.switch_to_block(catch);
728         let tydesc = bx.eh_catch_typeinfo();
729         let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
730         let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
731         bx.add_clause(vals, tydesc);
732         bx.add_clause(vals, bx.const_null(bx.type_i8p()));
733         let ptr = bx.extract_value(vals, 0);
734         let selector = bx.extract_value(vals, 1);
735
736         // Check if the typeid we got is the one for a Rust panic.
737         let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
738         let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
739         let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
740
741         // We need to pass two values to catch_func (ptr and is_rust_panic), so
742         // create an alloca and pass a pointer to that.
743         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
744         let i8_align = bx.tcx().data_layout.i8_align.abi;
745         let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
746         let catch_data = bx.alloca(catch_data_type, ptr_align);
747         let catch_data_0 =
748             bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
749         bx.store(ptr, catch_data_0, ptr_align);
750         let catch_data_1 =
751             bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
752         bx.store(is_rust_panic, catch_data_1, i8_align);
753         let catch_data = bx.bitcast(catch_data, bx.type_i8p());
754
755         let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
756         bx.call(catch_ty, None, catch_func, &[data, catch_data], None);
757         bx.ret(bx.const_i32(1));
758     });
759
760     // Note that no invoke is used here because by definition this function
761     // can't panic (that's what it's catching).
762     let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
763     let i32_align = bx.tcx().data_layout.i32_align.abi;
764     bx.store(ret, dest, i32_align);
765 }
766
767 // Helper function to give a Block to a closure to codegen a shim function.
768 // This is currently primarily used for the `try` intrinsic functions above.
769 fn gen_fn<'ll, 'tcx>(
770     cx: &CodegenCx<'ll, 'tcx>,
771     name: &str,
772     rust_fn_sig: ty::PolyFnSig<'tcx>,
773     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
774 ) -> (&'ll Type, &'ll Value) {
775     let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
776     let llty = fn_abi.llvm_type(cx);
777     let llfn = cx.declare_fn(name, fn_abi);
778     cx.set_frame_pointer_type(llfn);
779     cx.apply_target_cpu_attr(llfn);
780     // FIXME(eddyb) find a nicer way to do this.
781     unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
782     let llbb = Builder::append_block(cx, llfn, "entry-block");
783     let bx = Builder::build(cx, llbb);
784     codegen(bx);
785     (llty, llfn)
786 }
787
788 // Helper function used to get a handle to the `__rust_try` function used to
789 // catch exceptions.
790 //
791 // This function is only generated once and is then cached.
792 fn get_rust_try_fn<'ll, 'tcx>(
793     cx: &CodegenCx<'ll, 'tcx>,
794     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
795 ) -> (&'ll Type, &'ll Value) {
796     if let Some(llfn) = cx.rust_try_fn.get() {
797         return llfn;
798     }
799
800     // Define the type up front for the signature of the rust_try function.
801     let tcx = cx.tcx;
802     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
803     // `unsafe fn(*mut i8) -> ()`
804     let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
805         iter::once(i8p),
806         tcx.mk_unit(),
807         false,
808         hir::Unsafety::Unsafe,
809         Abi::Rust,
810     )));
811     // `unsafe fn(*mut i8, *mut i8) -> ()`
812     let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
813         [i8p, i8p].iter().cloned(),
814         tcx.mk_unit(),
815         false,
816         hir::Unsafety::Unsafe,
817         Abi::Rust,
818     )));
819     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
820     let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
821         [try_fn_ty, i8p, catch_fn_ty].into_iter(),
822         tcx.types.i32,
823         false,
824         hir::Unsafety::Unsafe,
825         Abi::Rust,
826     ));
827     let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
828     cx.rust_try_fn.set(Some(rust_try));
829     rust_try
830 }
831
832 fn generic_simd_intrinsic<'ll, 'tcx>(
833     bx: &mut Builder<'_, 'll, 'tcx>,
834     name: Symbol,
835     callee_ty: Ty<'tcx>,
836     args: &[OperandRef<'tcx, &'ll Value>],
837     ret_ty: Ty<'tcx>,
838     llret_ty: &'ll Type,
839     span: Span,
840 ) -> Result<&'ll Value, ()> {
841     // macros for error handling:
842     #[allow(unused_macro_rules)]
843     macro_rules! emit_error {
844         ($msg: tt) => {
845             emit_error!($msg, )
846         };
847         ($msg: tt, $($fmt: tt)*) => {
848             span_invalid_monomorphization_error(
849                 bx.sess(), span,
850                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
851                          name, $($fmt)*));
852         }
853     }
854
855     macro_rules! return_error {
856         ($($fmt: tt)*) => {
857             {
858                 emit_error!($($fmt)*);
859                 return Err(());
860             }
861         }
862     }
863
864     macro_rules! require {
865         ($cond: expr, $($fmt: tt)*) => {
866             if !$cond {
867                 return_error!($($fmt)*);
868             }
869         };
870     }
871
872     macro_rules! require_simd {
873         ($ty: expr, $position: expr) => {
874             require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
875         };
876     }
877
878     let tcx = bx.tcx();
879     let sig =
880         tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
881     let arg_tys = sig.inputs();
882
883     if name == sym::simd_select_bitmask {
884         require_simd!(arg_tys[1], "argument");
885         let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
886
887         let expected_int_bits = (len.max(8) - 1).next_power_of_two();
888         let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
889
890         let mask_ty = arg_tys[0];
891         let mask = match mask_ty.kind() {
892             ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
893             ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
894             ty::Array(elem, len)
895                 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
896                     && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
897                         == Some(expected_bytes) =>
898             {
899                 let place = PlaceRef::alloca(bx, args[0].layout);
900                 args[0].val.store(bx, place);
901                 let int_ty = bx.type_ix(expected_bytes * 8);
902                 let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
903                 bx.load(int_ty, ptr, Align::ONE)
904             }
905             _ => return_error!(
906                 "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
907                 mask_ty,
908                 expected_int_bits,
909                 expected_bytes
910             ),
911         };
912
913         let i1 = bx.type_i1();
914         let im = bx.type_ix(len);
915         let i1xn = bx.type_vector(i1, len);
916         let m_im = bx.trunc(mask, im);
917         let m_i1s = bx.bitcast(m_im, i1xn);
918         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
919     }
920
921     // every intrinsic below takes a SIMD vector as its first argument
922     require_simd!(arg_tys[0], "input");
923     let in_ty = arg_tys[0];
924
925     let comparison = match name {
926         sym::simd_eq => Some(hir::BinOpKind::Eq),
927         sym::simd_ne => Some(hir::BinOpKind::Ne),
928         sym::simd_lt => Some(hir::BinOpKind::Lt),
929         sym::simd_le => Some(hir::BinOpKind::Le),
930         sym::simd_gt => Some(hir::BinOpKind::Gt),
931         sym::simd_ge => Some(hir::BinOpKind::Ge),
932         _ => None,
933     };
934
935     let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
936     if let Some(cmp_op) = comparison {
937         require_simd!(ret_ty, "return");
938
939         let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
940         require!(
941             in_len == out_len,
942             "expected return type with length {} (same as input type `{}`), \
943              found `{}` with length {}",
944             in_len,
945             in_ty,
946             ret_ty,
947             out_len
948         );
949         require!(
950             bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
951             "expected return type with integer elements, found `{}` with non-integer `{}`",
952             ret_ty,
953             out_ty
954         );
955
956         return Ok(compare_simd_types(
957             bx,
958             args[0].immediate(),
959             args[1].immediate(),
960             in_elem,
961             llret_ty,
962             cmp_op,
963         ));
964     }
965
966     if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
967         // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
968         // If there is no suffix, use the index array length.
969         let n: u64 = if stripped.is_empty() {
970             // Make sure this is actually an array, since typeck only checks the length-suffixed
971             // version of this intrinsic.
972             match args[2].layout.ty.kind() {
973                 ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
974                     len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
975                         span_bug!(span, "could not evaluate shuffle index array length")
976                     })
977                 }
978                 _ => return_error!(
979                     "simd_shuffle index must be an array of `u32`, got `{}`",
980                     args[2].layout.ty
981                 ),
982             }
983         } else {
984             stripped.parse().unwrap_or_else(|_| {
985                 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
986             })
987         };
988
989         require_simd!(ret_ty, "return");
990         let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
991         require!(
992             out_len == n,
993             "expected return type of length {}, found `{}` with length {}",
994             n,
995             ret_ty,
996             out_len
997         );
998         require!(
999             in_elem == out_ty,
1000             "expected return element type `{}` (element of input `{}`), \
1001              found `{}` with element type `{}`",
1002             in_elem,
1003             in_ty,
1004             ret_ty,
1005             out_ty
1006         );
1007
1008         let total_len = u128::from(in_len) * 2;
1009
1010         let vector = args[2].immediate();
1011
1012         let indices: Option<Vec<_>> = (0..n)
1013             .map(|i| {
1014                 let arg_idx = i;
1015                 let val = bx.const_get_elt(vector, i as u64);
1016                 match bx.const_to_opt_u128(val, true) {
1017                     None => {
1018                         emit_error!("shuffle index #{} is not a constant", arg_idx);
1019                         None
1020                     }
1021                     Some(idx) if idx >= total_len => {
1022                         emit_error!(
1023                             "shuffle index #{} is out of bounds (limit {})",
1024                             arg_idx,
1025                             total_len
1026                         );
1027                         None
1028                     }
1029                     Some(idx) => Some(bx.const_i32(idx as i32)),
1030                 }
1031             })
1032             .collect();
1033         let Some(indices) = indices else {
1034             return Ok(bx.const_null(llret_ty));
1035         };
1036
1037         return Ok(bx.shuffle_vector(
1038             args[0].immediate(),
1039             args[1].immediate(),
1040             bx.const_vector(&indices),
1041         ));
1042     }
1043
1044     if name == sym::simd_insert {
1045         require!(
1046             in_elem == arg_tys[2],
1047             "expected inserted type `{}` (element of input `{}`), found `{}`",
1048             in_elem,
1049             in_ty,
1050             arg_tys[2]
1051         );
1052         return Ok(bx.insert_element(
1053             args[0].immediate(),
1054             args[2].immediate(),
1055             args[1].immediate(),
1056         ));
1057     }
1058     if name == sym::simd_extract {
1059         require!(
1060             ret_ty == in_elem,
1061             "expected return type `{}` (element of input `{}`), found `{}`",
1062             in_elem,
1063             in_ty,
1064             ret_ty
1065         );
1066         return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
1067     }
1068
1069     if name == sym::simd_select {
1070         let m_elem_ty = in_elem;
1071         let m_len = in_len;
1072         require_simd!(arg_tys[1], "argument");
1073         let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1074         require!(
1075             m_len == v_len,
1076             "mismatched lengths: mask length `{}` != other vector length `{}`",
1077             m_len,
1078             v_len
1079         );
1080         match m_elem_ty.kind() {
1081             ty::Int(_) => {}
1082             _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
1083         }
1084         // truncate the mask to a vector of i1s
1085         let i1 = bx.type_i1();
1086         let i1xn = bx.type_vector(i1, m_len as u64);
1087         let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1088         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1089     }
1090
1091     if name == sym::simd_bitmask {
1092         // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
1093         // vector mask and returns the most significant bit (MSB) of each lane in the form
1094         // of either:
1095         // * an unsigned integer
1096         // * an array of `u8`
1097         // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
1098         //
1099         // The bit order of the result depends on the byte endianness, LSB-first for little
1100         // endian and MSB-first for big endian.
1101         let expected_int_bits = in_len.max(8);
1102         let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
1103
1104         // Integer vector <i{in_bitwidth} x in_len>:
1105         let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
1106             ty::Int(i) => (
1107                 args[0].immediate(),
1108                 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
1109             ),
1110             ty::Uint(i) => (
1111                 args[0].immediate(),
1112                 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
1113             ),
1114             _ => return_error!(
1115                 "vector argument `{}`'s element type `{}`, expected integer element type",
1116                 in_ty,
1117                 in_elem
1118             ),
1119         };
1120
1121         // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1122         let shift_indices =
1123             vec![
1124                 bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1125                 in_len as _
1126             ];
1127         let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1128         // Truncate vector to an <i1 x N>
1129         let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
1130         // Bitcast <i1 x N> to iN:
1131         let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1132
1133         match ret_ty.kind() {
1134             ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
1135                 // Zero-extend iN to the bitmask type:
1136                 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1137             }
1138             ty::Array(elem, len)
1139                 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1140                     && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
1141                         == Some(expected_bytes) =>
1142             {
1143                 // Zero-extend iN to the array length:
1144                 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
1145
1146                 // Convert the integer to a byte array
1147                 let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
1148                 bx.store(ze, ptr, Align::ONE);
1149                 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
1150                 let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
1151                 return Ok(bx.load(array_ty, ptr, Align::ONE));
1152             }
1153             _ => return_error!(
1154                 "cannot return `{}`, expected `u{}` or `[u8; {}]`",
1155                 ret_ty,
1156                 expected_int_bits,
1157                 expected_bytes
1158             ),
1159         }
1160     }
1161
1162     fn simd_simple_float_intrinsic<'ll, 'tcx>(
1163         name: Symbol,
1164         in_elem: Ty<'_>,
1165         in_ty: Ty<'_>,
1166         in_len: u64,
1167         bx: &mut Builder<'_, 'll, 'tcx>,
1168         span: Span,
1169         args: &[OperandRef<'tcx, &'ll Value>],
1170     ) -> Result<&'ll Value, ()> {
1171         #[allow(unused_macro_rules)]
1172         macro_rules! emit_error {
1173             ($msg: tt) => {
1174                 emit_error!($msg, )
1175             };
1176             ($msg: tt, $($fmt: tt)*) => {
1177                 span_invalid_monomorphization_error(
1178                     bx.sess(), span,
1179                     &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1180                              name, $($fmt)*));
1181             }
1182         }
1183         macro_rules! return_error {
1184             ($($fmt: tt)*) => {
1185                 {
1186                     emit_error!($($fmt)*);
1187                     return Err(());
1188                 }
1189             }
1190         }
1191
1192         let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
1193             let elem_ty = bx.cx.type_float_from_ty(*f);
1194             match f.bit_width() {
1195                 32 => ("f32", elem_ty),
1196                 64 => ("f64", elem_ty),
1197                 _ => {
1198                     return_error!(
1199                         "unsupported element type `{}` of floating-point vector `{}`",
1200                         f.name_str(),
1201                         in_ty
1202                     );
1203                 }
1204             }
1205         } else {
1206             return_error!("`{}` is not a floating-point type", in_ty);
1207         };
1208
1209         let vec_ty = bx.type_vector(elem_ty, in_len);
1210
1211         let (intr_name, fn_ty) = match name {
1212             sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
1213             sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
1214             sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
1215             sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
1216             sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
1217             sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
1218             sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
1219             sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
1220             sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
1221             sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
1222             sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
1223             sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
1224             sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
1225             sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
1226             sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
1227             sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
1228             _ => return_error!("unrecognized intrinsic `{}`", name),
1229         };
1230         let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
1231         let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
1232         let c = bx.call(
1233             fn_ty,
1234             None,
1235             f,
1236             &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
1237             None,
1238         );
1239         Ok(c)
1240     }
1241
1242     if std::matches!(
1243         name,
1244         sym::simd_ceil
1245             | sym::simd_fabs
1246             | sym::simd_fcos
1247             | sym::simd_fexp2
1248             | sym::simd_fexp
1249             | sym::simd_flog10
1250             | sym::simd_flog2
1251             | sym::simd_flog
1252             | sym::simd_floor
1253             | sym::simd_fma
1254             | sym::simd_fpow
1255             | sym::simd_fpowi
1256             | sym::simd_fsin
1257             | sym::simd_fsqrt
1258             | sym::simd_round
1259             | sym::simd_trunc
1260     ) {
1261         return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
1262     }
1263
1264     // FIXME: use:
1265     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1266     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1267     fn llvm_vector_str(
1268         elem_ty: Ty<'_>,
1269         vec_len: u64,
1270         no_pointers: usize,
1271         bx: &Builder<'_, '_, '_>,
1272     ) -> String {
1273         let p0s: String = "p0".repeat(no_pointers);
1274         match *elem_ty.kind() {
1275             ty::Int(v) => format!(
1276                 "v{}{}i{}",
1277                 vec_len,
1278                 p0s,
1279                 // Normalize to prevent crash if v: IntTy::Isize
1280                 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
1281             ),
1282             ty::Uint(v) => format!(
1283                 "v{}{}i{}",
1284                 vec_len,
1285                 p0s,
1286                 // Normalize to prevent crash if v: UIntTy::Usize
1287                 v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
1288             ),
1289             ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1290             _ => unreachable!(),
1291         }
1292     }
1293
1294     fn llvm_vector_ty<'ll>(
1295         cx: &CodegenCx<'ll, '_>,
1296         elem_ty: Ty<'_>,
1297         vec_len: u64,
1298         mut no_pointers: usize,
1299     ) -> &'ll Type {
1300         // FIXME: use cx.layout_of(ty).llvm_type() ?
1301         let mut elem_ty = match *elem_ty.kind() {
1302             ty::Int(v) => cx.type_int_from_ty(v),
1303             ty::Uint(v) => cx.type_uint_from_ty(v),
1304             ty::Float(v) => cx.type_float_from_ty(v),
1305             _ => unreachable!(),
1306         };
1307         while no_pointers > 0 {
1308             elem_ty = cx.type_ptr_to(elem_ty);
1309             no_pointers -= 1;
1310         }
1311         cx.type_vector(elem_ty, vec_len)
1312     }
1313
1314     if name == sym::simd_gather {
1315         // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1316         //             mask: <N x i{M}>) -> <N x T>
1317         // * N: number of elements in the input vectors
1318         // * T: type of the element to load
1319         // * M: any integer width is supported, will be truncated to i1
1320
1321         // All types must be simd vector types
1322         require_simd!(in_ty, "first");
1323         require_simd!(arg_tys[1], "second");
1324         require_simd!(arg_tys[2], "third");
1325         require_simd!(ret_ty, "return");
1326
1327         // Of the same length:
1328         let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1329         let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
1330         require!(
1331             in_len == out_len,
1332             "expected {} argument with length {} (same as input type `{}`), \
1333              found `{}` with length {}",
1334             "second",
1335             in_len,
1336             in_ty,
1337             arg_tys[1],
1338             out_len
1339         );
1340         require!(
1341             in_len == out_len2,
1342             "expected {} argument with length {} (same as input type `{}`), \
1343              found `{}` with length {}",
1344             "third",
1345             in_len,
1346             in_ty,
1347             arg_tys[2],
1348             out_len2
1349         );
1350
1351         // The return type must match the first argument type
1352         require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
1353
1354         // This counts how many pointers
1355         fn ptr_count(t: Ty<'_>) -> usize {
1356             match t.kind() {
1357                 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1358                 _ => 0,
1359             }
1360         }
1361
1362         // Non-ptr type
1363         fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1364             match t.kind() {
1365                 ty::RawPtr(p) => non_ptr(p.ty),
1366                 _ => t,
1367             }
1368         }
1369
1370         // The second argument must be a simd vector with an element type that's a pointer
1371         // to the element type of the first argument
1372         let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
1373         let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
1374         let (pointer_count, underlying_ty) = match element_ty1.kind() {
1375             ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
1376             _ => {
1377                 require!(
1378                     false,
1379                     "expected element type `{}` of second argument `{}` \
1380                         to be a pointer to the element type `{}` of the first \
1381                         argument `{}`, found `{}` != `*_ {}`",
1382                     element_ty1,
1383                     arg_tys[1],
1384                     in_elem,
1385                     in_ty,
1386                     element_ty1,
1387                     in_elem
1388                 );
1389                 unreachable!();
1390             }
1391         };
1392         assert!(pointer_count > 0);
1393         assert_eq!(pointer_count - 1, ptr_count(element_ty0));
1394         assert_eq!(underlying_ty, non_ptr(element_ty0));
1395
1396         // The element type of the third argument must be a signed integer type of any width:
1397         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
1398         match element_ty2.kind() {
1399             ty::Int(_) => (),
1400             _ => {
1401                 require!(
1402                     false,
1403                     "expected element type `{}` of third argument `{}` \
1404                                  to be a signed integer type",
1405                     element_ty2,
1406                     arg_tys[2]
1407                 );
1408             }
1409         }
1410
1411         // Alignment of T, must be a constant integer value:
1412         let alignment_ty = bx.type_i32();
1413         let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1414
1415         // Truncate the mask vector to a vector of i1s:
1416         let (mask, mask_ty) = {
1417             let i1 = bx.type_i1();
1418             let i1xn = bx.type_vector(i1, in_len);
1419             (bx.trunc(args[2].immediate(), i1xn), i1xn)
1420         };
1421
1422         // Type of the vector of pointers:
1423         let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1424         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
1425
1426         // Type of the vector of elements:
1427         let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1428         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
1429
1430         let llvm_intrinsic =
1431             format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1432         let fn_ty = bx.type_func(
1433             &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1434             llvm_elem_vec_ty,
1435         );
1436         let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1437         let v = bx.call(
1438             fn_ty,
1439             None,
1440             f,
1441             &[args[1].immediate(), alignment, mask, args[0].immediate()],
1442             None,
1443         );
1444         return Ok(v);
1445     }
1446
1447     if name == sym::simd_scatter {
1448         // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1449         //             mask: <N x i{M}>) -> ()
1450         // * N: number of elements in the input vectors
1451         // * T: type of the element to load
1452         // * M: any integer width is supported, will be truncated to i1
1453
1454         // All types must be simd vector types
1455         require_simd!(in_ty, "first");
1456         require_simd!(arg_tys[1], "second");
1457         require_simd!(arg_tys[2], "third");
1458
1459         // Of the same length:
1460         let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
1461         let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
1462         require!(
1463             in_len == element_len1,
1464             "expected {} argument with length {} (same as input type `{}`), \
1465             found `{}` with length {}",
1466             "second",
1467             in_len,
1468             in_ty,
1469             arg_tys[1],
1470             element_len1
1471         );
1472         require!(
1473             in_len == element_len2,
1474             "expected {} argument with length {} (same as input type `{}`), \
1475             found `{}` with length {}",
1476             "third",
1477             in_len,
1478             in_ty,
1479             arg_tys[2],
1480             element_len2
1481         );
1482
1483         // This counts how many pointers
1484         fn ptr_count(t: Ty<'_>) -> usize {
1485             match t.kind() {
1486                 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1487                 _ => 0,
1488             }
1489         }
1490
1491         // Non-ptr type
1492         fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1493             match t.kind() {
1494                 ty::RawPtr(p) => non_ptr(p.ty),
1495                 _ => t,
1496             }
1497         }
1498
1499         // The second argument must be a simd vector with an element type that's a pointer
1500         // to the element type of the first argument
1501         let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
1502         let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
1503         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
1504         let (pointer_count, underlying_ty) = match element_ty1.kind() {
1505             ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => {
1506                 (ptr_count(element_ty1), non_ptr(element_ty1))
1507             }
1508             _ => {
1509                 require!(
1510                     false,
1511                     "expected element type `{}` of second argument `{}` \
1512                         to be a pointer to the element type `{}` of the first \
1513                         argument `{}`, found `{}` != `*mut {}`",
1514                     element_ty1,
1515                     arg_tys[1],
1516                     in_elem,
1517                     in_ty,
1518                     element_ty1,
1519                     in_elem
1520                 );
1521                 unreachable!();
1522             }
1523         };
1524         assert!(pointer_count > 0);
1525         assert_eq!(pointer_count - 1, ptr_count(element_ty0));
1526         assert_eq!(underlying_ty, non_ptr(element_ty0));
1527
1528         // The element type of the third argument must be a signed integer type of any width:
1529         match element_ty2.kind() {
1530             ty::Int(_) => (),
1531             _ => {
1532                 require!(
1533                     false,
1534                     "expected element type `{}` of third argument `{}` \
1535                          be a signed integer type",
1536                     element_ty2,
1537                     arg_tys[2]
1538                 );
1539             }
1540         }
1541
1542         // Alignment of T, must be a constant integer value:
1543         let alignment_ty = bx.type_i32();
1544         let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1545
1546         // Truncate the mask vector to a vector of i1s:
1547         let (mask, mask_ty) = {
1548             let i1 = bx.type_i1();
1549             let i1xn = bx.type_vector(i1, in_len);
1550             (bx.trunc(args[2].immediate(), i1xn), i1xn)
1551         };
1552
1553         let ret_t = bx.type_void();
1554
1555         // Type of the vector of pointers:
1556         let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1557         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
1558
1559         // Type of the vector of elements:
1560         let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1561         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
1562
1563         let llvm_intrinsic =
1564             format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1565         let fn_ty =
1566             bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
1567         let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
1568         let v = bx.call(
1569             fn_ty,
1570             None,
1571             f,
1572             &[args[0].immediate(), args[1].immediate(), alignment, mask],
1573             None,
1574         );
1575         return Ok(v);
1576     }
1577
1578     macro_rules! arith_red {
1579         ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1580          $identity:expr) => {
1581             if name == sym::$name {
1582                 require!(
1583                     ret_ty == in_elem,
1584                     "expected return type `{}` (element of input `{}`), found `{}`",
1585                     in_elem,
1586                     in_ty,
1587                     ret_ty
1588                 );
1589                 return match in_elem.kind() {
1590                     ty::Int(_) | ty::Uint(_) => {
1591                         let r = bx.$integer_reduce(args[0].immediate());
1592                         if $ordered {
1593                             // if overflow occurs, the result is the
1594                             // mathematical result modulo 2^n:
1595                             Ok(bx.$op(args[1].immediate(), r))
1596                         } else {
1597                             Ok(bx.$integer_reduce(args[0].immediate()))
1598                         }
1599                     }
1600                     ty::Float(f) => {
1601                         let acc = if $ordered {
1602                             // ordered arithmetic reductions take an accumulator
1603                             args[1].immediate()
1604                         } else {
1605                             // unordered arithmetic reductions use the identity accumulator
1606                             match f.bit_width() {
1607                                 32 => bx.const_real(bx.type_f32(), $identity),
1608                                 64 => bx.const_real(bx.type_f64(), $identity),
1609                                 v => return_error!(
1610                                     r#"
1611 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1612                                     sym::$name,
1613                                     in_ty,
1614                                     in_elem,
1615                                     v,
1616                                     ret_ty
1617                                 ),
1618                             }
1619                         };
1620                         Ok(bx.$float_reduce(acc, args[0].immediate()))
1621                     }
1622                     _ => return_error!(
1623                         "unsupported {} from `{}` with element `{}` to `{}`",
1624                         sym::$name,
1625                         in_ty,
1626                         in_elem,
1627                         ret_ty
1628                     ),
1629                 };
1630             }
1631         };
1632     }
1633
1634     arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
1635     arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1636     arith_red!(
1637         simd_reduce_add_unordered: vector_reduce_add,
1638         vector_reduce_fadd_fast,
1639         false,
1640         add,
1641         0.0
1642     );
1643     arith_red!(
1644         simd_reduce_mul_unordered: vector_reduce_mul,
1645         vector_reduce_fmul_fast,
1646         false,
1647         mul,
1648         1.0
1649     );
1650
1651     macro_rules! minmax_red {
1652         ($name:ident: $int_red:ident, $float_red:ident) => {
1653             if name == sym::$name {
1654                 require!(
1655                     ret_ty == in_elem,
1656                     "expected return type `{}` (element of input `{}`), found `{}`",
1657                     in_elem,
1658                     in_ty,
1659                     ret_ty
1660                 );
1661                 return match in_elem.kind() {
1662                     ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1663                     ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1664                     ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1665                     _ => return_error!(
1666                         "unsupported {} from `{}` with element `{}` to `{}`",
1667                         sym::$name,
1668                         in_ty,
1669                         in_elem,
1670                         ret_ty
1671                     ),
1672                 };
1673             }
1674         };
1675     }
1676
1677     minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
1678     minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
1679
1680     minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
1681     minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
1682
1683     macro_rules! bitwise_red {
1684         ($name:ident : $red:ident, $boolean:expr) => {
1685             if name == sym::$name {
1686                 let input = if !$boolean {
1687                     require!(
1688                         ret_ty == in_elem,
1689                         "expected return type `{}` (element of input `{}`), found `{}`",
1690                         in_elem,
1691                         in_ty,
1692                         ret_ty
1693                     );
1694                     args[0].immediate()
1695                 } else {
1696                     match in_elem.kind() {
1697                         ty::Int(_) | ty::Uint(_) => {}
1698                         _ => return_error!(
1699                             "unsupported {} from `{}` with element `{}` to `{}`",
1700                             sym::$name,
1701                             in_ty,
1702                             in_elem,
1703                             ret_ty
1704                         ),
1705                     }
1706
1707                     // boolean reductions operate on vectors of i1s:
1708                     let i1 = bx.type_i1();
1709                     let i1xn = bx.type_vector(i1, in_len as u64);
1710                     bx.trunc(args[0].immediate(), i1xn)
1711                 };
1712                 return match in_elem.kind() {
1713                     ty::Int(_) | ty::Uint(_) => {
1714                         let r = bx.$red(input);
1715                         Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
1716                     }
1717                     _ => return_error!(
1718                         "unsupported {} from `{}` with element `{}` to `{}`",
1719                         sym::$name,
1720                         in_ty,
1721                         in_elem,
1722                         ret_ty
1723                     ),
1724                 };
1725             }
1726         };
1727     }
1728
1729     bitwise_red!(simd_reduce_and: vector_reduce_and, false);
1730     bitwise_red!(simd_reduce_or: vector_reduce_or, false);
1731     bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
1732     bitwise_red!(simd_reduce_all: vector_reduce_and, true);
1733     bitwise_red!(simd_reduce_any: vector_reduce_or, true);
1734
1735     if name == sym::simd_cast_ptr {
1736         require_simd!(ret_ty, "return");
1737         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
1738         require!(
1739             in_len == out_len,
1740             "expected return type with length {} (same as input type `{}`), \
1741                   found `{}` with length {}",
1742             in_len,
1743             in_ty,
1744             ret_ty,
1745             out_len
1746         );
1747
1748         match in_elem.kind() {
1749             ty::RawPtr(p) => {
1750                 let (metadata, check_sized) = p.ty.ptr_metadata_ty(bx.tcx, |ty| {
1751                     bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
1752                 });
1753                 assert!(!check_sized); // we are in codegen, so we shouldn't see these types
1754                 require!(metadata.is_unit(), "cannot cast fat pointer `{}`", in_elem)
1755             }
1756             _ => return_error!("expected pointer, got `{}`", in_elem),
1757         }
1758         match out_elem.kind() {
1759             ty::RawPtr(p) => {
1760                 let (metadata, check_sized) = p.ty.ptr_metadata_ty(bx.tcx, |ty| {
1761                     bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
1762                 });
1763                 assert!(!check_sized); // we are in codegen, so we shouldn't see these types
1764                 require!(metadata.is_unit(), "cannot cast to fat pointer `{}`", out_elem)
1765             }
1766             _ => return_error!("expected pointer, got `{}`", out_elem),
1767         }
1768
1769         if in_elem == out_elem {
1770             return Ok(args[0].immediate());
1771         } else {
1772             return Ok(bx.pointercast(args[0].immediate(), llret_ty));
1773         }
1774     }
1775
1776     if name == sym::simd_expose_addr {
1777         require_simd!(ret_ty, "return");
1778         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
1779         require!(
1780             in_len == out_len,
1781             "expected return type with length {} (same as input type `{}`), \
1782                   found `{}` with length {}",
1783             in_len,
1784             in_ty,
1785             ret_ty,
1786             out_len
1787         );
1788
1789         match in_elem.kind() {
1790             ty::RawPtr(_) => {}
1791             _ => return_error!("expected pointer, got `{}`", in_elem),
1792         }
1793         match out_elem.kind() {
1794             ty::Uint(ty::UintTy::Usize) => {}
1795             _ => return_error!("expected `usize`, got `{}`", out_elem),
1796         }
1797
1798         return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
1799     }
1800
1801     if name == sym::simd_from_exposed_addr {
1802         require_simd!(ret_ty, "return");
1803         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
1804         require!(
1805             in_len == out_len,
1806             "expected return type with length {} (same as input type `{}`), \
1807                   found `{}` with length {}",
1808             in_len,
1809             in_ty,
1810             ret_ty,
1811             out_len
1812         );
1813
1814         match in_elem.kind() {
1815             ty::Uint(ty::UintTy::Usize) => {}
1816             _ => return_error!("expected `usize`, got `{}`", in_elem),
1817         }
1818         match out_elem.kind() {
1819             ty::RawPtr(_) => {}
1820             _ => return_error!("expected pointer, got `{}`", out_elem),
1821         }
1822
1823         return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
1824     }
1825
1826     if name == sym::simd_cast || name == sym::simd_as {
1827         require_simd!(ret_ty, "return");
1828         let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
1829         require!(
1830             in_len == out_len,
1831             "expected return type with length {} (same as input type `{}`), \
1832                   found `{}` with length {}",
1833             in_len,
1834             in_ty,
1835             ret_ty,
1836             out_len
1837         );
1838         // casting cares about nominal type, not just structural type
1839         if in_elem == out_elem {
1840             return Ok(args[0].immediate());
1841         }
1842
1843         enum Style {
1844             Float,
1845             Int(/* is signed? */ bool),
1846             Unsupported,
1847         }
1848
1849         let (in_style, in_width) = match in_elem.kind() {
1850             // vectors of pointer-sized integers should've been
1851             // disallowed before here, so this unwrap is safe.
1852             ty::Int(i) => (
1853                 Style::Int(true),
1854                 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
1855             ),
1856             ty::Uint(u) => (
1857                 Style::Int(false),
1858                 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
1859             ),
1860             ty::Float(f) => (Style::Float, f.bit_width()),
1861             _ => (Style::Unsupported, 0),
1862         };
1863         let (out_style, out_width) = match out_elem.kind() {
1864             ty::Int(i) => (
1865                 Style::Int(true),
1866                 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
1867             ),
1868             ty::Uint(u) => (
1869                 Style::Int(false),
1870                 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
1871             ),
1872             ty::Float(f) => (Style::Float, f.bit_width()),
1873             _ => (Style::Unsupported, 0),
1874         };
1875
1876         match (in_style, out_style) {
1877             (Style::Int(in_is_signed), Style::Int(_)) => {
1878                 return Ok(match in_width.cmp(&out_width) {
1879                     Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1880                     Ordering::Equal => args[0].immediate(),
1881                     Ordering::Less => {
1882                         if in_is_signed {
1883                             bx.sext(args[0].immediate(), llret_ty)
1884                         } else {
1885                             bx.zext(args[0].immediate(), llret_ty)
1886                         }
1887                     }
1888                 });
1889             }
1890             (Style::Int(in_is_signed), Style::Float) => {
1891                 return Ok(if in_is_signed {
1892                     bx.sitofp(args[0].immediate(), llret_ty)
1893                 } else {
1894                     bx.uitofp(args[0].immediate(), llret_ty)
1895                 });
1896             }
1897             (Style::Float, Style::Int(out_is_signed)) => {
1898                 return Ok(match (out_is_signed, name == sym::simd_as) {
1899                     (false, false) => bx.fptoui(args[0].immediate(), llret_ty),
1900                     (true, false) => bx.fptosi(args[0].immediate(), llret_ty),
1901                     (_, true) => bx.cast_float_to_int(out_is_signed, args[0].immediate(), llret_ty),
1902                 });
1903             }
1904             (Style::Float, Style::Float) => {
1905                 return Ok(match in_width.cmp(&out_width) {
1906                     Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1907                     Ordering::Equal => args[0].immediate(),
1908                     Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
1909                 });
1910             }
1911             _ => { /* Unsupported. Fallthrough. */ }
1912         }
1913         require!(
1914             false,
1915             "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1916             in_ty,
1917             in_elem,
1918             ret_ty,
1919             out_elem
1920         );
1921     }
1922     macro_rules! arith_binary {
1923         ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1924             $(if name == sym::$name {
1925                 match in_elem.kind() {
1926                     $($(ty::$p(_))|* => {
1927                         return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1928                     })*
1929                     _ => {},
1930                 }
1931                 require!(false,
1932                          "unsupported operation on `{}` with element `{}`",
1933                          in_ty,
1934                          in_elem)
1935             })*
1936         }
1937     }
1938     arith_binary! {
1939         simd_add: Uint, Int => add, Float => fadd;
1940         simd_sub: Uint, Int => sub, Float => fsub;
1941         simd_mul: Uint, Int => mul, Float => fmul;
1942         simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
1943         simd_rem: Uint => urem, Int => srem, Float => frem;
1944         simd_shl: Uint, Int => shl;
1945         simd_shr: Uint => lshr, Int => ashr;
1946         simd_and: Uint, Int => and;
1947         simd_or: Uint, Int => or;
1948         simd_xor: Uint, Int => xor;
1949         simd_fmax: Float => maxnum;
1950         simd_fmin: Float => minnum;
1951
1952     }
1953     macro_rules! arith_unary {
1954         ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1955             $(if name == sym::$name {
1956                 match in_elem.kind() {
1957                     $($(ty::$p(_))|* => {
1958                         return Ok(bx.$call(args[0].immediate()))
1959                     })*
1960                     _ => {},
1961                 }
1962                 require!(false,
1963                          "unsupported operation on `{}` with element `{}`",
1964                          in_ty,
1965                          in_elem)
1966             })*
1967         }
1968     }
1969     arith_unary! {
1970         simd_neg: Int => neg, Float => fneg;
1971     }
1972
1973     if name == sym::simd_arith_offset {
1974         // This also checks that the first operand is a ptr type.
1975         let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
1976             span_bug!(span, "must be called with a vector of pointer types as first argument")
1977         });
1978         let layout = bx.layout_of(pointee.ty);
1979         let ptrs = args[0].immediate();
1980         // The second argument must be a ptr-sized integer.
1981         // (We don't care about the signedness, this is wrapping anyway.)
1982         let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
1983         if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
1984             span_bug!(
1985                 span,
1986                 "must be called with a vector of pointer-sized integers as second argument"
1987             );
1988         }
1989         let offsets = args[1].immediate();
1990
1991         return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
1992     }
1993
1994     if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
1995         let lhs = args[0].immediate();
1996         let rhs = args[1].immediate();
1997         let is_add = name == sym::simd_saturating_add;
1998         let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
1999         let (signed, elem_width, elem_ty) = match *in_elem.kind() {
2000             ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
2001             ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
2002             _ => {
2003                 return_error!(
2004                     "expected element type `{}` of vector type `{}` \
2005                      to be a signed or unsigned integer type",
2006                     arg_tys[0].simd_size_and_type(bx.tcx()).1,
2007                     arg_tys[0]
2008                 );
2009             }
2010         };
2011         let llvm_intrinsic = &format!(
2012             "llvm.{}{}.sat.v{}i{}",
2013             if signed { 's' } else { 'u' },
2014             if is_add { "add" } else { "sub" },
2015             in_len,
2016             elem_width
2017         );
2018         let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2019
2020         let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
2021         let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
2022         let v = bx.call(fn_ty, None, f, &[lhs, rhs], None);
2023         return Ok(v);
2024     }
2025
2026     span_bug!(span, "unknown SIMD intrinsic");
2027 }
2028
2029 // Returns the width of an int Ty, and if it's signed or not
2030 // Returns None if the type is not an integer
2031 // FIXME: there’s multiple of this functions, investigate using some of the already existing
2032 // stuffs.
2033 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
2034     match ty.kind() {
2035         ty::Int(t) => {
2036             Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
2037         }
2038         ty::Uint(t) => {
2039             Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))
2040         }
2041         _ => None,
2042     }
2043 }