]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/intrinsic.rs
Auto merge of #74737 - smmalis37:astconv-factor, r=davidtwco
[rust.git] / src / librustc_codegen_llvm / intrinsic.rs
1 use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
4 use crate::llvm;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::va_arg::emit_va_arg;
8 use crate::value::Value;
9
10 use log::debug;
11
12 use rustc_ast::ast;
13 use rustc_codegen_ssa::base::{compare_simd_types, to_immediate, wants_msvc_seh};
14 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
15 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
16 use rustc_codegen_ssa::coverageinfo::CounterOp;
17 use rustc_codegen_ssa::glue;
18 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
19 use rustc_codegen_ssa::mir::place::PlaceRef;
20 use rustc_codegen_ssa::traits::*;
21 use rustc_codegen_ssa::MemFlags;
22 use rustc_hir as hir;
23 use rustc_middle::mir::coverage;
24 use rustc_middle::mir::Operand;
25 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
26 use rustc_middle::ty::{self, Ty};
27 use rustc_middle::{bug, span_bug};
28 use rustc_span::{sym, symbol::kw, Span, Symbol};
29 use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
30 use rustc_target::spec::PanicStrategy;
31
32 use std::cmp::Ordering;
33 use std::iter;
34
35 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
36     let llvm_name = match name {
37         sym::sqrtf32 => "llvm.sqrt.f32",
38         sym::sqrtf64 => "llvm.sqrt.f64",
39         sym::powif32 => "llvm.powi.f32",
40         sym::powif64 => "llvm.powi.f64",
41         sym::sinf32 => "llvm.sin.f32",
42         sym::sinf64 => "llvm.sin.f64",
43         sym::cosf32 => "llvm.cos.f32",
44         sym::cosf64 => "llvm.cos.f64",
45         sym::powf32 => "llvm.pow.f32",
46         sym::powf64 => "llvm.pow.f64",
47         sym::expf32 => "llvm.exp.f32",
48         sym::expf64 => "llvm.exp.f64",
49         sym::exp2f32 => "llvm.exp2.f32",
50         sym::exp2f64 => "llvm.exp2.f64",
51         sym::logf32 => "llvm.log.f32",
52         sym::logf64 => "llvm.log.f64",
53         sym::log10f32 => "llvm.log10.f32",
54         sym::log10f64 => "llvm.log10.f64",
55         sym::log2f32 => "llvm.log2.f32",
56         sym::log2f64 => "llvm.log2.f64",
57         sym::fmaf32 => "llvm.fma.f32",
58         sym::fmaf64 => "llvm.fma.f64",
59         sym::fabsf32 => "llvm.fabs.f32",
60         sym::fabsf64 => "llvm.fabs.f64",
61         sym::minnumf32 => "llvm.minnum.f32",
62         sym::minnumf64 => "llvm.minnum.f64",
63         sym::maxnumf32 => "llvm.maxnum.f32",
64         sym::maxnumf64 => "llvm.maxnum.f64",
65         sym::copysignf32 => "llvm.copysign.f32",
66         sym::copysignf64 => "llvm.copysign.f64",
67         sym::floorf32 => "llvm.floor.f32",
68         sym::floorf64 => "llvm.floor.f64",
69         sym::ceilf32 => "llvm.ceil.f32",
70         sym::ceilf64 => "llvm.ceil.f64",
71         sym::truncf32 => "llvm.trunc.f32",
72         sym::truncf64 => "llvm.trunc.f64",
73         sym::rintf32 => "llvm.rint.f32",
74         sym::rintf64 => "llvm.rint.f64",
75         sym::nearbyintf32 => "llvm.nearbyint.f32",
76         sym::nearbyintf64 => "llvm.nearbyint.f64",
77         sym::roundf32 => "llvm.round.f32",
78         sym::roundf64 => "llvm.round.f64",
79         sym::assume => "llvm.assume",
80         sym::abort => "llvm.trap",
81         _ => return None,
82     };
83     Some(cx.get_intrinsic(&llvm_name))
84 }
85
86 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
87     fn is_codegen_intrinsic(
88         &mut self,
89         intrinsic: Symbol,
90         args: &Vec<Operand<'tcx>>,
91         caller_instance: ty::Instance<'tcx>,
92     ) -> bool {
93         if self.tcx.sess.opts.debugging_opts.instrument_coverage {
94             // Add the coverage information from the MIR to the Codegen context. Some coverage
95             // intrinsics are used only to pass along the coverage information (returns `false`
96             // for `is_codegen_intrinsic()`), but `count_code_region` is also converted into an
97             // LLVM intrinsic to increment a coverage counter.
98             match intrinsic {
99                 sym::count_code_region => {
100                     use coverage::count_code_region_args::*;
101                     self.add_counter_region(
102                         caller_instance,
103                         op_to_u64(&args[FUNCTION_SOURCE_HASH]),
104                         op_to_u32(&args[COUNTER_INDEX]),
105                         op_to_u32(&args[START_BYTE_POS]),
106                         op_to_u32(&args[END_BYTE_POS]),
107                     );
108                     return true; // Also inject the counter increment in the backend
109                 }
110                 sym::coverage_counter_add | sym::coverage_counter_subtract => {
111                     use coverage::coverage_counter_expression_args::*;
112                     self.add_counter_expression_region(
113                         caller_instance,
114                         op_to_u32(&args[COUNTER_EXPRESSION_INDEX]),
115                         op_to_u32(&args[LEFT_INDEX]),
116                         if intrinsic == sym::coverage_counter_add {
117                             CounterOp::Add
118                         } else {
119                             CounterOp::Subtract
120                         },
121                         op_to_u32(&args[RIGHT_INDEX]),
122                         op_to_u32(&args[START_BYTE_POS]),
123                         op_to_u32(&args[END_BYTE_POS]),
124                     );
125                     return false; // Does not inject backend code
126                 }
127                 sym::coverage_unreachable => {
128                     use coverage::coverage_unreachable_args::*;
129                     self.add_unreachable_region(
130                         caller_instance,
131                         op_to_u32(&args[START_BYTE_POS]),
132                         op_to_u32(&args[END_BYTE_POS]),
133                     );
134                     return false; // Does not inject backend code
135                 }
136                 _ => {}
137             }
138         } else {
139             // NOT self.tcx.sess.opts.debugging_opts.instrument_coverage
140             if intrinsic == sym::count_code_region {
141                 // An external crate may have been pre-compiled with coverage instrumentation, and
142                 // some references from the current crate to the external crate might carry along
143                 // the call terminators to coverage intrinsics, like `count_code_region` (for
144                 // example, when instantiating a generic function). If the current crate has
145                 // `instrument_coverage` disabled, the `count_code_region` call terminators should
146                 // be ignored.
147                 return false; // Do not inject coverage counters inlined from external crates
148             }
149         }
150         true // Unhandled intrinsics should be passed to `codegen_intrinsic_call()`
151     }
152
153     fn codegen_intrinsic_call(
154         &mut self,
155         instance: ty::Instance<'tcx>,
156         fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
157         args: &[OperandRef<'tcx, &'ll Value>],
158         llresult: &'ll Value,
159         span: Span,
160         caller_instance: ty::Instance<'tcx>,
161     ) {
162         let tcx = self.tcx;
163         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
164
165         let (def_id, substs) = match callee_ty.kind {
166             ty::FnDef(def_id, substs) => (def_id, substs),
167             _ => bug!("expected fn item type, found {}", callee_ty),
168         };
169
170         let sig = callee_ty.fn_sig(tcx);
171         let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
172         let arg_tys = sig.inputs();
173         let ret_ty = sig.output();
174         let name = tcx.item_name(def_id);
175         let name_str = &*name.as_str();
176
177         let llret_ty = self.layout_of(ret_ty).llvm_type(self);
178         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
179
180         let simple = get_simple_intrinsic(self, name);
181         let llval = match name {
182             _ if simple.is_some() => self.call(
183                 simple.unwrap(),
184                 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
185                 None,
186             ),
187             sym::unreachable => {
188                 return;
189             }
190             sym::likely => {
191                 let expect = self.get_intrinsic(&("llvm.expect.i1"));
192                 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
193             }
194             sym::unlikely => {
195                 let expect = self.get_intrinsic(&("llvm.expect.i1"));
196                 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
197             }
198             kw::Try => {
199                 try_intrinsic(
200                     self,
201                     args[0].immediate(),
202                     args[1].immediate(),
203                     args[2].immediate(),
204                     llresult,
205                 );
206                 return;
207             }
208             sym::breakpoint => {
209                 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
210                 self.call(llfn, &[], None)
211             }
212             sym::count_code_region => {
213                 // FIXME(richkadel): The current implementation assumes the MIR for the given
214                 // caller_instance represents a single function. Validate and/or correct if inlining
215                 // and/or monomorphization invalidates these assumptions.
216                 let coverageinfo = tcx.coverageinfo(caller_instance.def_id());
217                 let mangled_fn = tcx.symbol_name(caller_instance);
218                 let (mangled_fn_name, _len_val) = self.const_str(Symbol::intern(mangled_fn.name));
219                 let num_counters = self.const_u32(coverageinfo.num_counters);
220                 use coverage::count_code_region_args::*;
221                 let hash = args[FUNCTION_SOURCE_HASH].immediate();
222                 let index = args[COUNTER_INDEX].immediate();
223                 debug!(
224                     "translating Rust intrinsic `count_code_region()` to LLVM intrinsic: \
225                     instrprof.increment(fn_name={}, hash={:?}, num_counters={:?}, index={:?})",
226                     mangled_fn.name, hash, num_counters, index,
227                 );
228                 self.instrprof_increment(mangled_fn_name, hash, num_counters, index)
229             }
230             sym::va_start => self.va_start(args[0].immediate()),
231             sym::va_end => self.va_end(args[0].immediate()),
232             sym::va_copy => {
233                 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
234                 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
235             }
236             sym::va_arg => {
237                 match fn_abi.ret.layout.abi {
238                     abi::Abi::Scalar(ref scalar) => {
239                         match scalar.value {
240                             Primitive::Int(..) => {
241                                 if self.cx().size_of(ret_ty).bytes() < 4 {
242                                     // `va_arg` should not be called on a integer type
243                                     // less than 4 bytes in length. If it is, promote
244                                     // the integer to a `i32` and truncate the result
245                                     // back to the smaller type.
246                                     let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
247                                     self.trunc(promoted_result, llret_ty)
248                                 } else {
249                                     emit_va_arg(self, args[0], ret_ty)
250                                 }
251                             }
252                             Primitive::F64 | Primitive::Pointer => {
253                                 emit_va_arg(self, args[0], ret_ty)
254                             }
255                             // `va_arg` should never be used with the return type f32.
256                             Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
257                         }
258                     }
259                     _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
260                 }
261             }
262             sym::size_of_val => {
263                 let tp_ty = substs.type_at(0);
264                 if let OperandValue::Pair(_, meta) = args[0].val {
265                     let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
266                     llsize
267                 } else {
268                     self.const_usize(self.size_of(tp_ty).bytes())
269                 }
270             }
271             sym::min_align_of_val => {
272                 let tp_ty = substs.type_at(0);
273                 if let OperandValue::Pair(_, meta) = args[0].val {
274                     let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
275                     llalign
276                 } else {
277                     self.const_usize(self.align_of(tp_ty).bytes())
278                 }
279             }
280             sym::size_of
281             | sym::pref_align_of
282             | sym::min_align_of
283             | sym::needs_drop
284             | sym::type_id
285             | sym::type_name
286             | sym::variant_count => {
287                 let value = self
288                     .tcx
289                     .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
290                     .unwrap();
291                 OperandRef::from_const(self, value, ret_ty).immediate_or_packed_pair(self)
292             }
293             // Effectively no-op
294             sym::forget => {
295                 return;
296             }
297             sym::offset => {
298                 let ptr = args[0].immediate();
299                 let offset = args[1].immediate();
300                 self.inbounds_gep(ptr, &[offset])
301             }
302             sym::arith_offset => {
303                 let ptr = args[0].immediate();
304                 let offset = args[1].immediate();
305                 self.gep(ptr, &[offset])
306             }
307
308             sym::copy_nonoverlapping => {
309                 copy_intrinsic(
310                     self,
311                     false,
312                     false,
313                     substs.type_at(0),
314                     args[1].immediate(),
315                     args[0].immediate(),
316                     args[2].immediate(),
317                 );
318                 return;
319             }
320             sym::copy => {
321                 copy_intrinsic(
322                     self,
323                     true,
324                     false,
325                     substs.type_at(0),
326                     args[1].immediate(),
327                     args[0].immediate(),
328                     args[2].immediate(),
329                 );
330                 return;
331             }
332             sym::write_bytes => {
333                 memset_intrinsic(
334                     self,
335                     false,
336                     substs.type_at(0),
337                     args[0].immediate(),
338                     args[1].immediate(),
339                     args[2].immediate(),
340                 );
341                 return;
342             }
343
344             sym::volatile_copy_nonoverlapping_memory => {
345                 copy_intrinsic(
346                     self,
347                     false,
348                     true,
349                     substs.type_at(0),
350                     args[0].immediate(),
351                     args[1].immediate(),
352                     args[2].immediate(),
353                 );
354                 return;
355             }
356             sym::volatile_copy_memory => {
357                 copy_intrinsic(
358                     self,
359                     true,
360                     true,
361                     substs.type_at(0),
362                     args[0].immediate(),
363                     args[1].immediate(),
364                     args[2].immediate(),
365                 );
366                 return;
367             }
368             sym::volatile_set_memory => {
369                 memset_intrinsic(
370                     self,
371                     true,
372                     substs.type_at(0),
373                     args[0].immediate(),
374                     args[1].immediate(),
375                     args[2].immediate(),
376                 );
377                 return;
378             }
379             sym::volatile_load | sym::unaligned_volatile_load => {
380                 let tp_ty = substs.type_at(0);
381                 let mut ptr = args[0].immediate();
382                 if let PassMode::Cast(ty) = fn_abi.ret.mode {
383                     ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
384                 }
385                 let load = self.volatile_load(ptr);
386                 let align = if name == sym::unaligned_volatile_load {
387                     1
388                 } else {
389                     self.align_of(tp_ty).bytes() as u32
390                 };
391                 unsafe {
392                     llvm::LLVMSetAlignment(load, align);
393                 }
394                 to_immediate(self, load, self.layout_of(tp_ty))
395             }
396             sym::volatile_store => {
397                 let dst = args[0].deref(self.cx());
398                 args[1].val.volatile_store(self, dst);
399                 return;
400             }
401             sym::unaligned_volatile_store => {
402                 let dst = args[0].deref(self.cx());
403                 args[1].val.unaligned_volatile_store(self, dst);
404                 return;
405             }
406             sym::prefetch_read_data
407             | sym::prefetch_write_data
408             | sym::prefetch_read_instruction
409             | sym::prefetch_write_instruction => {
410                 let expect = self.get_intrinsic(&("llvm.prefetch"));
411                 let (rw, cache_type) = match name {
412                     sym::prefetch_read_data => (0, 1),
413                     sym::prefetch_write_data => (1, 1),
414                     sym::prefetch_read_instruction => (0, 0),
415                     sym::prefetch_write_instruction => (1, 0),
416                     _ => bug!(),
417                 };
418                 self.call(
419                     expect,
420                     &[
421                         args[0].immediate(),
422                         self.const_i32(rw),
423                         args[1].immediate(),
424                         self.const_i32(cache_type),
425                     ],
426                     None,
427                 )
428             }
429             sym::ctlz
430             | sym::ctlz_nonzero
431             | sym::cttz
432             | sym::cttz_nonzero
433             | sym::ctpop
434             | sym::bswap
435             | sym::bitreverse
436             | sym::add_with_overflow
437             | sym::sub_with_overflow
438             | sym::mul_with_overflow
439             | sym::wrapping_add
440             | sym::wrapping_sub
441             | sym::wrapping_mul
442             | sym::unchecked_div
443             | sym::unchecked_rem
444             | sym::unchecked_shl
445             | sym::unchecked_shr
446             | sym::unchecked_add
447             | sym::unchecked_sub
448             | sym::unchecked_mul
449             | sym::exact_div
450             | sym::rotate_left
451             | sym::rotate_right
452             | sym::saturating_add
453             | sym::saturating_sub => {
454                 let ty = arg_tys[0];
455                 match int_type_width_signed(ty, self) {
456                     Some((width, signed)) => match name {
457                         sym::ctlz | sym::cttz => {
458                             let y = self.const_bool(false);
459                             let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
460                             self.call(llfn, &[args[0].immediate(), y], None)
461                         }
462                         sym::ctlz_nonzero | sym::cttz_nonzero => {
463                             let y = self.const_bool(true);
464                             let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
465                             let llfn = self.get_intrinsic(llvm_name);
466                             self.call(llfn, &[args[0].immediate(), y], None)
467                         }
468                         sym::ctpop => self.call(
469                             self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
470                             &[args[0].immediate()],
471                             None,
472                         ),
473                         sym::bswap => {
474                             if width == 8 {
475                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
476                             } else {
477                                 self.call(
478                                     self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
479                                     &[args[0].immediate()],
480                                     None,
481                                 )
482                             }
483                         }
484                         sym::bitreverse => self.call(
485                             self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
486                             &[args[0].immediate()],
487                             None,
488                         ),
489                         sym::add_with_overflow
490                         | sym::sub_with_overflow
491                         | sym::mul_with_overflow => {
492                             let intrinsic = format!(
493                                 "llvm.{}{}.with.overflow.i{}",
494                                 if signed { 's' } else { 'u' },
495                                 &name_str[..3],
496                                 width
497                             );
498                             let llfn = self.get_intrinsic(&intrinsic);
499
500                             // Convert `i1` to a `bool`, and write it to the out parameter
501                             let pair =
502                                 self.call(llfn, &[args[0].immediate(), args[1].immediate()], None);
503                             let val = self.extract_value(pair, 0);
504                             let overflow = self.extract_value(pair, 1);
505                             let overflow = self.zext(overflow, self.type_bool());
506
507                             let dest = result.project_field(self, 0);
508                             self.store(val, dest.llval, dest.align);
509                             let dest = result.project_field(self, 1);
510                             self.store(overflow, dest.llval, dest.align);
511
512                             return;
513                         }
514                         sym::wrapping_add => self.add(args[0].immediate(), args[1].immediate()),
515                         sym::wrapping_sub => self.sub(args[0].immediate(), args[1].immediate()),
516                         sym::wrapping_mul => self.mul(args[0].immediate(), args[1].immediate()),
517                         sym::exact_div => {
518                             if signed {
519                                 self.exactsdiv(args[0].immediate(), args[1].immediate())
520                             } else {
521                                 self.exactudiv(args[0].immediate(), args[1].immediate())
522                             }
523                         }
524                         sym::unchecked_div => {
525                             if signed {
526                                 self.sdiv(args[0].immediate(), args[1].immediate())
527                             } else {
528                                 self.udiv(args[0].immediate(), args[1].immediate())
529                             }
530                         }
531                         sym::unchecked_rem => {
532                             if signed {
533                                 self.srem(args[0].immediate(), args[1].immediate())
534                             } else {
535                                 self.urem(args[0].immediate(), args[1].immediate())
536                             }
537                         }
538                         sym::unchecked_shl => self.shl(args[0].immediate(), args[1].immediate()),
539                         sym::unchecked_shr => {
540                             if signed {
541                                 self.ashr(args[0].immediate(), args[1].immediate())
542                             } else {
543                                 self.lshr(args[0].immediate(), args[1].immediate())
544                             }
545                         }
546                         sym::unchecked_add => {
547                             if signed {
548                                 self.unchecked_sadd(args[0].immediate(), args[1].immediate())
549                             } else {
550                                 self.unchecked_uadd(args[0].immediate(), args[1].immediate())
551                             }
552                         }
553                         sym::unchecked_sub => {
554                             if signed {
555                                 self.unchecked_ssub(args[0].immediate(), args[1].immediate())
556                             } else {
557                                 self.unchecked_usub(args[0].immediate(), args[1].immediate())
558                             }
559                         }
560                         sym::unchecked_mul => {
561                             if signed {
562                                 self.unchecked_smul(args[0].immediate(), args[1].immediate())
563                             } else {
564                                 self.unchecked_umul(args[0].immediate(), args[1].immediate())
565                             }
566                         }
567                         sym::rotate_left | sym::rotate_right => {
568                             let is_left = name == sym::rotate_left;
569                             let val = args[0].immediate();
570                             let raw_shift = args[1].immediate();
571                             // rotate = funnel shift with first two args the same
572                             let llvm_name =
573                                 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
574                             let llfn = self.get_intrinsic(llvm_name);
575                             self.call(llfn, &[val, val, raw_shift], None)
576                         }
577                         sym::saturating_add | sym::saturating_sub => {
578                             let is_add = name == sym::saturating_add;
579                             let lhs = args[0].immediate();
580                             let rhs = args[1].immediate();
581                             let llvm_name = &format!(
582                                 "llvm.{}{}.sat.i{}",
583                                 if signed { 's' } else { 'u' },
584                                 if is_add { "add" } else { "sub" },
585                                 width
586                             );
587                             let llfn = self.get_intrinsic(llvm_name);
588                             self.call(llfn, &[lhs, rhs], None)
589                         }
590                         _ => bug!(),
591                     },
592                     None => {
593                         span_invalid_monomorphization_error(
594                             tcx.sess,
595                             span,
596                             &format!(
597                                 "invalid monomorphization of `{}` intrinsic: \
598                                       expected basic integer type, found `{}`",
599                                 name, ty
600                             ),
601                         );
602                         return;
603                     }
604                 }
605             }
606             sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
607                 match float_type_width(arg_tys[0]) {
608                     Some(_width) => match name {
609                         sym::fadd_fast => self.fadd_fast(args[0].immediate(), args[1].immediate()),
610                         sym::fsub_fast => self.fsub_fast(args[0].immediate(), args[1].immediate()),
611                         sym::fmul_fast => self.fmul_fast(args[0].immediate(), args[1].immediate()),
612                         sym::fdiv_fast => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
613                         sym::frem_fast => self.frem_fast(args[0].immediate(), args[1].immediate()),
614                         _ => bug!(),
615                     },
616                     None => {
617                         span_invalid_monomorphization_error(
618                             tcx.sess,
619                             span,
620                             &format!(
621                                 "invalid monomorphization of `{}` intrinsic: \
622                                       expected basic float type, found `{}`",
623                                 name, arg_tys[0]
624                             ),
625                         );
626                         return;
627                     }
628                 }
629             }
630
631             sym::float_to_int_unchecked => {
632                 let float_width = match float_type_width(arg_tys[0]) {
633                     Some(width) => width,
634                     None => {
635                         span_invalid_monomorphization_error(
636                             tcx.sess,
637                             span,
638                             &format!(
639                                 "invalid monomorphization of `float_to_int_unchecked` \
640                                   intrinsic: expected basic float type, \
641                                   found `{}`",
642                                 arg_tys[0]
643                             ),
644                         );
645                         return;
646                     }
647                 };
648                 let (width, signed) = match int_type_width_signed(ret_ty, self.cx) {
649                     Some(pair) => pair,
650                     None => {
651                         span_invalid_monomorphization_error(
652                             tcx.sess,
653                             span,
654                             &format!(
655                                 "invalid monomorphization of `float_to_int_unchecked` \
656                                       intrinsic:  expected basic integer type, \
657                                       found `{}`",
658                                 ret_ty
659                             ),
660                         );
661                         return;
662                     }
663                 };
664
665                 // The LLVM backend can reorder and speculate `fptosi` and
666                 // `fptoui`, so on WebAssembly the codegen for this instruction
667                 // is quite heavyweight. To avoid this heavyweight codegen we
668                 // instead use the raw wasm intrinsics which will lower to one
669                 // instruction in WebAssembly (`iNN.trunc_fMM_{s,u}`). This one
670                 // instruction will trap if the operand is out of bounds, but
671                 // that's ok since this intrinsic is UB if the operands are out
672                 // of bounds, so the behavior can be different on WebAssembly
673                 // than other targets.
674                 //
675                 // Note, however, that when the `nontrapping-fptoint` feature is
676                 // enabled in LLVM then LLVM will lower `fptosi` to
677                 // `iNN.trunc_sat_fMM_{s,u}`, so if that's the case we don't
678                 // bother with intrinsics.
679                 let mut result = None;
680                 if self.sess().target.target.arch == "wasm32"
681                     && !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
682                 {
683                     let name = match (width, float_width, signed) {
684                         (32, 32, true) => Some("llvm.wasm.trunc.signed.i32.f32"),
685                         (32, 64, true) => Some("llvm.wasm.trunc.signed.i32.f64"),
686                         (64, 32, true) => Some("llvm.wasm.trunc.signed.i64.f32"),
687                         (64, 64, true) => Some("llvm.wasm.trunc.signed.i64.f64"),
688                         (32, 32, false) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
689                         (32, 64, false) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
690                         (64, 32, false) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
691                         (64, 64, false) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
692                         _ => None,
693                     };
694                     if let Some(name) = name {
695                         let intrinsic = self.get_intrinsic(name);
696                         result = Some(self.call(intrinsic, &[args[0].immediate()], None));
697                     }
698                 }
699                 result.unwrap_or_else(|| {
700                     if signed {
701                         self.fptosi(args[0].immediate(), self.cx.type_ix(width))
702                     } else {
703                         self.fptoui(args[0].immediate(), self.cx.type_ix(width))
704                     }
705                 })
706             }
707
708             sym::discriminant_value => {
709                 if ret_ty.is_integral() {
710                     args[0].deref(self.cx()).codegen_get_discr(self, ret_ty)
711                 } else {
712                     span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
713                 }
714             }
715
716             _ if name_str.starts_with("simd_") => {
717                 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
718                     Ok(llval) => llval,
719                     Err(()) => return,
720                 }
721             }
722             // This requires that atomic intrinsics follow a specific naming pattern:
723             // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
724             name if name_str.starts_with("atomic_") => {
725                 use rustc_codegen_ssa::common::AtomicOrdering::*;
726                 use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
727
728                 let split: Vec<&str> = name_str.split('_').collect();
729
730                 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
731                 let (order, failorder) = match split.len() {
732                     2 => (SequentiallyConsistent, SequentiallyConsistent),
733                     3 => match split[2] {
734                         "unordered" => (Unordered, Unordered),
735                         "relaxed" => (Monotonic, Monotonic),
736                         "acq" => (Acquire, Acquire),
737                         "rel" => (Release, Monotonic),
738                         "acqrel" => (AcquireRelease, Acquire),
739                         "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
740                         "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
741                         _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
742                     },
743                     4 => match (split[2], split[3]) {
744                         ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
745                         ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
746                         _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
747                     },
748                     _ => self.sess().fatal("Atomic intrinsic not in correct format"),
749                 };
750
751                 let invalid_monomorphization = |ty| {
752                     span_invalid_monomorphization_error(
753                         tcx.sess,
754                         span,
755                         &format!(
756                             "invalid monomorphization of `{}` intrinsic: \
757                                   expected basic integer type, found `{}`",
758                             name, ty
759                         ),
760                     );
761                 };
762
763                 match split[1] {
764                     "cxchg" | "cxchgweak" => {
765                         let ty = substs.type_at(0);
766                         if int_type_width_signed(ty, self).is_some() {
767                             let weak = split[1] == "cxchgweak";
768                             let pair = self.atomic_cmpxchg(
769                                 args[0].immediate(),
770                                 args[1].immediate(),
771                                 args[2].immediate(),
772                                 order,
773                                 failorder,
774                                 weak,
775                             );
776                             let val = self.extract_value(pair, 0);
777                             let success = self.extract_value(pair, 1);
778                             let success = self.zext(success, self.type_bool());
779
780                             let dest = result.project_field(self, 0);
781                             self.store(val, dest.llval, dest.align);
782                             let dest = result.project_field(self, 1);
783                             self.store(success, dest.llval, dest.align);
784                             return;
785                         } else {
786                             return invalid_monomorphization(ty);
787                         }
788                     }
789
790                     "load" => {
791                         let ty = substs.type_at(0);
792                         if int_type_width_signed(ty, self).is_some() {
793                             let size = self.size_of(ty);
794                             self.atomic_load(args[0].immediate(), order, size)
795                         } else {
796                             return invalid_monomorphization(ty);
797                         }
798                     }
799
800                     "store" => {
801                         let ty = substs.type_at(0);
802                         if int_type_width_signed(ty, self).is_some() {
803                             let size = self.size_of(ty);
804                             self.atomic_store(
805                                 args[1].immediate(),
806                                 args[0].immediate(),
807                                 order,
808                                 size,
809                             );
810                             return;
811                         } else {
812                             return invalid_monomorphization(ty);
813                         }
814                     }
815
816                     "fence" => {
817                         self.atomic_fence(order, SynchronizationScope::CrossThread);
818                         return;
819                     }
820
821                     "singlethreadfence" => {
822                         self.atomic_fence(order, SynchronizationScope::SingleThread);
823                         return;
824                     }
825
826                     // These are all AtomicRMW ops
827                     op => {
828                         let atom_op = match op {
829                             "xchg" => AtomicRmwBinOp::AtomicXchg,
830                             "xadd" => AtomicRmwBinOp::AtomicAdd,
831                             "xsub" => AtomicRmwBinOp::AtomicSub,
832                             "and" => AtomicRmwBinOp::AtomicAnd,
833                             "nand" => AtomicRmwBinOp::AtomicNand,
834                             "or" => AtomicRmwBinOp::AtomicOr,
835                             "xor" => AtomicRmwBinOp::AtomicXor,
836                             "max" => AtomicRmwBinOp::AtomicMax,
837                             "min" => AtomicRmwBinOp::AtomicMin,
838                             "umax" => AtomicRmwBinOp::AtomicUMax,
839                             "umin" => AtomicRmwBinOp::AtomicUMin,
840                             _ => self.sess().fatal("unknown atomic operation"),
841                         };
842
843                         let ty = substs.type_at(0);
844                         if int_type_width_signed(ty, self).is_some() {
845                             self.atomic_rmw(
846                                 atom_op,
847                                 args[0].immediate(),
848                                 args[1].immediate(),
849                                 order,
850                             )
851                         } else {
852                             return invalid_monomorphization(ty);
853                         }
854                     }
855                 }
856             }
857
858             sym::nontemporal_store => {
859                 let dst = args[0].deref(self.cx());
860                 args[1].val.nontemporal_store(self, dst);
861                 return;
862             }
863
864             sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
865                 let a = args[0].immediate();
866                 let b = args[1].immediate();
867                 if name == sym::ptr_guaranteed_eq {
868                     self.icmp(IntPredicate::IntEQ, a, b)
869                 } else {
870                     self.icmp(IntPredicate::IntNE, a, b)
871                 }
872             }
873
874             sym::ptr_offset_from => {
875                 let ty = substs.type_at(0);
876                 let pointee_size = self.size_of(ty);
877
878                 // This is the same sequence that Clang emits for pointer subtraction.
879                 // It can be neither `nsw` nor `nuw` because the input is treated as
880                 // unsigned but then the output is treated as signed, so neither works.
881                 let a = args[0].immediate();
882                 let b = args[1].immediate();
883                 let a = self.ptrtoint(a, self.type_isize());
884                 let b = self.ptrtoint(b, self.type_isize());
885                 let d = self.sub(a, b);
886                 let pointee_size = self.const_usize(pointee_size.bytes());
887                 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
888                 self.exactsdiv(d, pointee_size)
889             }
890
891             _ => bug!("unknown intrinsic '{}'", name),
892         };
893
894         if !fn_abi.ret.is_ignore() {
895             if let PassMode::Cast(ty) = fn_abi.ret.mode {
896                 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
897                 let ptr = self.pointercast(result.llval, ptr_llty);
898                 self.store(llval, ptr, result.align);
899             } else {
900                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
901                     .val
902                     .store(self, result);
903             }
904         }
905     }
906
907     fn abort(&mut self) {
908         let fnname = self.get_intrinsic(&("llvm.trap"));
909         self.call(fnname, &[], None);
910     }
911
912     fn assume(&mut self, val: Self::Value) {
913         let assume_intrinsic = self.get_intrinsic("llvm.assume");
914         self.call(assume_intrinsic, &[val], None);
915     }
916
917     fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
918         let expect = self.get_intrinsic(&"llvm.expect.i1");
919         self.call(expect, &[cond, self.const_bool(expected)], None)
920     }
921
922     fn sideeffect(&mut self) {
923         if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
924             let fnname = self.get_intrinsic(&("llvm.sideeffect"));
925             self.call(fnname, &[], None);
926         }
927     }
928
929     fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
930         let intrinsic = self.cx().get_intrinsic("llvm.va_start");
931         self.call(intrinsic, &[va_list], None)
932     }
933
934     fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
935         let intrinsic = self.cx().get_intrinsic("llvm.va_end");
936         self.call(intrinsic, &[va_list], None)
937     }
938 }
939
940 fn copy_intrinsic(
941     bx: &mut Builder<'a, 'll, 'tcx>,
942     allow_overlap: bool,
943     volatile: bool,
944     ty: Ty<'tcx>,
945     dst: &'ll Value,
946     src: &'ll Value,
947     count: &'ll Value,
948 ) {
949     let (size, align) = bx.size_and_align_of(ty);
950     let size = bx.mul(bx.const_usize(size.bytes()), count);
951     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
952     if allow_overlap {
953         bx.memmove(dst, align, src, align, size, flags);
954     } else {
955         bx.memcpy(dst, align, src, align, size, flags);
956     }
957 }
958
959 fn memset_intrinsic(
960     bx: &mut Builder<'a, 'll, 'tcx>,
961     volatile: bool,
962     ty: Ty<'tcx>,
963     dst: &'ll Value,
964     val: &'ll Value,
965     count: &'ll Value,
966 ) {
967     let (size, align) = bx.size_and_align_of(ty);
968     let size = bx.mul(bx.const_usize(size.bytes()), count);
969     let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
970     bx.memset(dst, val, size, align, flags);
971 }
972
973 fn try_intrinsic(
974     bx: &mut Builder<'a, 'll, 'tcx>,
975     try_func: &'ll Value,
976     data: &'ll Value,
977     catch_func: &'ll Value,
978     dest: &'ll Value,
979 ) {
980     if bx.sess().panic_strategy() == PanicStrategy::Abort {
981         bx.call(try_func, &[data], None);
982         // Return 0 unconditionally from the intrinsic call;
983         // we can never unwind.
984         let ret_align = bx.tcx().data_layout.i32_align.abi;
985         bx.store(bx.const_i32(0), dest, ret_align);
986     } else if wants_msvc_seh(bx.sess()) {
987         codegen_msvc_try(bx, try_func, data, catch_func, dest);
988     } else {
989         codegen_gnu_try(bx, try_func, data, catch_func, dest);
990     }
991 }
992
993 // MSVC's definition of the `rust_try` function.
994 //
995 // This implementation uses the new exception handling instructions in LLVM
996 // which have support in LLVM for SEH on MSVC targets. Although these
997 // instructions are meant to work for all targets, as of the time of this
998 // writing, however, LLVM does not recommend the usage of these new instructions
999 // as the old ones are still more optimized.
1000 fn codegen_msvc_try(
1001     bx: &mut Builder<'a, 'll, 'tcx>,
1002     try_func: &'ll Value,
1003     data: &'ll Value,
1004     catch_func: &'ll Value,
1005     dest: &'ll Value,
1006 ) {
1007     let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1008         bx.set_personality_fn(bx.eh_personality());
1009         bx.sideeffect();
1010
1011         let mut normal = bx.build_sibling_block("normal");
1012         let mut catchswitch = bx.build_sibling_block("catchswitch");
1013         let mut catchpad = bx.build_sibling_block("catchpad");
1014         let mut caught = bx.build_sibling_block("caught");
1015
1016         let try_func = llvm::get_param(bx.llfn(), 0);
1017         let data = llvm::get_param(bx.llfn(), 1);
1018         let catch_func = llvm::get_param(bx.llfn(), 2);
1019
1020         // We're generating an IR snippet that looks like:
1021         //
1022         //   declare i32 @rust_try(%try_func, %data, %catch_func) {
1023         //      %slot = alloca u8*
1024         //      invoke %try_func(%data) to label %normal unwind label %catchswitch
1025         //
1026         //   normal:
1027         //      ret i32 0
1028         //
1029         //   catchswitch:
1030         //      %cs = catchswitch within none [%catchpad] unwind to caller
1031         //
1032         //   catchpad:
1033         //      %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1034         //      %ptr = load %slot
1035         //      call %catch_func(%data, %ptr)
1036         //      catchret from %tok to label %caught
1037         //
1038         //   caught:
1039         //      ret i32 1
1040         //   }
1041         //
1042         // This structure follows the basic usage of throw/try/catch in LLVM.
1043         // For example, compile this C++ snippet to see what LLVM generates:
1044         //
1045         //      #include <stdint.h>
1046         //
1047         //      struct rust_panic {
1048         //          rust_panic(const rust_panic&);
1049         //          ~rust_panic();
1050         //
1051         //          uint64_t x[2];
1052         //      };
1053         //
1054         //      int __rust_try(
1055         //          void (*try_func)(void*),
1056         //          void *data,
1057         //          void (*catch_func)(void*, void*) noexcept
1058         //      ) {
1059         //          try {
1060         //              try_func(data);
1061         //              return 0;
1062         //          } catch(rust_panic& a) {
1063         //              catch_func(data, &a);
1064         //              return 1;
1065         //          }
1066         //      }
1067         //
1068         // More information can be found in libstd's seh.rs implementation.
1069         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
1070         let slot = bx.alloca(bx.type_i8p(), ptr_align);
1071         bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
1072
1073         normal.ret(bx.const_i32(0));
1074
1075         let cs = catchswitch.catch_switch(None, None, 1);
1076         catchswitch.add_handler(cs, catchpad.llbb());
1077
1078         // We can't use the TypeDescriptor defined in libpanic_unwind because it
1079         // might be in another DLL and the SEH encoding only supports specifying
1080         // a TypeDescriptor from the current module.
1081         //
1082         // However this isn't an issue since the MSVC runtime uses string
1083         // comparison on the type name to match TypeDescriptors rather than
1084         // pointer equality.
1085         //
1086         // So instead we generate a new TypeDescriptor in each module that uses
1087         // `try` and let the linker merge duplicate definitions in the same
1088         // module.
1089         //
1090         // When modifying, make sure that the type_name string exactly matches
1091         // the one used in src/libpanic_unwind/seh.rs.
1092         let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
1093         let type_name = bx.const_bytes(b"rust_panic\0");
1094         let type_info =
1095             bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
1096         let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
1097         unsafe {
1098             llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
1099             llvm::SetUniqueComdat(bx.llmod, tydesc);
1100             llvm::LLVMSetInitializer(tydesc, type_info);
1101         }
1102
1103         // The flag value of 8 indicates that we are catching the exception by
1104         // reference instead of by value. We can't use catch by value because
1105         // that requires copying the exception object, which we don't support
1106         // since our exception object effectively contains a Box.
1107         //
1108         // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
1109         let flags = bx.const_i32(8);
1110         let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
1111         let ptr = catchpad.load(slot, ptr_align);
1112         catchpad.call(catch_func, &[data, ptr], Some(&funclet));
1113
1114         catchpad.catch_ret(&funclet, caught.llbb());
1115
1116         caught.ret(bx.const_i32(1));
1117     });
1118
1119     // Note that no invoke is used here because by definition this function
1120     // can't panic (that's what it's catching).
1121     let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1122     let i32_align = bx.tcx().data_layout.i32_align.abi;
1123     bx.store(ret, dest, i32_align);
1124 }
1125
1126 // Definition of the standard `try` function for Rust using the GNU-like model
1127 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
1128 // instructions).
1129 //
1130 // This codegen is a little surprising because we always call a shim
1131 // function instead of inlining the call to `invoke` manually here. This is done
1132 // because in LLVM we're only allowed to have one personality per function
1133 // definition. The call to the `try` intrinsic is being inlined into the
1134 // function calling it, and that function may already have other personality
1135 // functions in play. By calling a shim we're guaranteed that our shim will have
1136 // the right personality function.
1137 fn codegen_gnu_try(
1138     bx: &mut Builder<'a, 'll, 'tcx>,
1139     try_func: &'ll Value,
1140     data: &'ll Value,
1141     catch_func: &'ll Value,
1142     dest: &'ll Value,
1143 ) {
1144     let llfn = get_rust_try_fn(bx, &mut |mut bx| {
1145         // Codegens the shims described above:
1146         //
1147         //   bx:
1148         //      invoke %try_func(%data) normal %normal unwind %catch
1149         //
1150         //   normal:
1151         //      ret 0
1152         //
1153         //   catch:
1154         //      (%ptr, _) = landingpad
1155         //      call %catch_func(%data, %ptr)
1156         //      ret 1
1157
1158         bx.sideeffect();
1159
1160         let mut then = bx.build_sibling_block("then");
1161         let mut catch = bx.build_sibling_block("catch");
1162
1163         let try_func = llvm::get_param(bx.llfn(), 0);
1164         let data = llvm::get_param(bx.llfn(), 1);
1165         let catch_func = llvm::get_param(bx.llfn(), 2);
1166         bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
1167         then.ret(bx.const_i32(0));
1168
1169         // Type indicator for the exception being thrown.
1170         //
1171         // The first value in this tuple is a pointer to the exception object
1172         // being thrown.  The second value is a "selector" indicating which of
1173         // the landing pad clauses the exception's type had been matched to.
1174         // rust_try ignores the selector.
1175         let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
1176         let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
1177         let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
1178             Some(tydesc) => {
1179                 let tydesc = bx.get_static(tydesc);
1180                 bx.bitcast(tydesc, bx.type_i8p())
1181             }
1182             None => bx.const_null(bx.type_i8p()),
1183         };
1184         catch.add_clause(vals, tydesc);
1185         let ptr = catch.extract_value(vals, 0);
1186         catch.call(catch_func, &[data, ptr], None);
1187         catch.ret(bx.const_i32(1));
1188     });
1189
1190     // Note that no invoke is used here because by definition this function
1191     // can't panic (that's what it's catching).
1192     let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1193     let i32_align = bx.tcx().data_layout.i32_align.abi;
1194     bx.store(ret, dest, i32_align);
1195 }
1196
1197 // Helper function to give a Block to a closure to codegen a shim function.
1198 // This is currently primarily used for the `try` intrinsic functions above.
1199 fn gen_fn<'ll, 'tcx>(
1200     cx: &CodegenCx<'ll, 'tcx>,
1201     name: &str,
1202     inputs: Vec<Ty<'tcx>>,
1203     output: Ty<'tcx>,
1204     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1205 ) -> &'ll Value {
1206     let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
1207         inputs.into_iter(),
1208         output,
1209         false,
1210         hir::Unsafety::Unsafe,
1211         Abi::Rust,
1212     ));
1213     let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
1214     let llfn = cx.declare_fn(name, &fn_abi);
1215     cx.set_frame_pointer_elimination(llfn);
1216     cx.apply_target_cpu_attr(llfn);
1217     // FIXME(eddyb) find a nicer way to do this.
1218     unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
1219     let bx = Builder::new_block(cx, llfn, "entry-block");
1220     codegen(bx);
1221     llfn
1222 }
1223
1224 // Helper function used to get a handle to the `__rust_try` function used to
1225 // catch exceptions.
1226 //
1227 // This function is only generated once and is then cached.
1228 fn get_rust_try_fn<'ll, 'tcx>(
1229     cx: &CodegenCx<'ll, 'tcx>,
1230     codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1231 ) -> &'ll Value {
1232     if let Some(llfn) = cx.rust_try_fn.get() {
1233         return llfn;
1234     }
1235
1236     // Define the type up front for the signature of the rust_try function.
1237     let tcx = cx.tcx;
1238     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1239     let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1240         iter::once(i8p),
1241         tcx.mk_unit(),
1242         false,
1243         hir::Unsafety::Unsafe,
1244         Abi::Rust,
1245     )));
1246     let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1247         [i8p, i8p].iter().cloned(),
1248         tcx.mk_unit(),
1249         false,
1250         hir::Unsafety::Unsafe,
1251         Abi::Rust,
1252     )));
1253     let output = tcx.types.i32;
1254     let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
1255     cx.rust_try_fn.set(Some(rust_try));
1256     rust_try
1257 }
1258
1259 fn generic_simd_intrinsic(
1260     bx: &mut Builder<'a, 'll, 'tcx>,
1261     name: Symbol,
1262     callee_ty: Ty<'tcx>,
1263     args: &[OperandRef<'tcx, &'ll Value>],
1264     ret_ty: Ty<'tcx>,
1265     llret_ty: &'ll Type,
1266     span: Span,
1267 ) -> Result<&'ll Value, ()> {
1268     // macros for error handling:
1269     macro_rules! emit_error {
1270         ($msg: tt) => {
1271             emit_error!($msg, )
1272         };
1273         ($msg: tt, $($fmt: tt)*) => {
1274             span_invalid_monomorphization_error(
1275                 bx.sess(), span,
1276                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1277                          name, $($fmt)*));
1278         }
1279     }
1280
1281     macro_rules! return_error {
1282         ($($fmt: tt)*) => {
1283             {
1284                 emit_error!($($fmt)*);
1285                 return Err(());
1286             }
1287         }
1288     }
1289
1290     macro_rules! require {
1291         ($cond: expr, $($fmt: tt)*) => {
1292             if !$cond {
1293                 return_error!($($fmt)*);
1294             }
1295         };
1296     }
1297
1298     macro_rules! require_simd {
1299         ($ty: expr, $position: expr) => {
1300             require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1301         };
1302     }
1303
1304     let tcx = bx.tcx();
1305     let sig = tcx
1306         .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &callee_ty.fn_sig(tcx));
1307     let arg_tys = sig.inputs();
1308     let name_str = &*name.as_str();
1309
1310     if name == sym::simd_select_bitmask {
1311         let in_ty = arg_tys[0];
1312         let m_len = match in_ty.kind {
1313             // Note that this `.unwrap()` crashes for isize/usize, that's sort
1314             // of intentional as there's not currently a use case for that.
1315             ty::Int(i) => i.bit_width().unwrap(),
1316             ty::Uint(i) => i.bit_width().unwrap(),
1317             _ => return_error!("`{}` is not an integral type", in_ty),
1318         };
1319         require_simd!(arg_tys[1], "argument");
1320         let v_len = arg_tys[1].simd_size(tcx);
1321         require!(
1322             m_len == v_len,
1323             "mismatched lengths: mask length `{}` != other vector length `{}`",
1324             m_len,
1325             v_len
1326         );
1327         let i1 = bx.type_i1();
1328         let i1xn = bx.type_vector(i1, m_len);
1329         let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
1330         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1331     }
1332
1333     // every intrinsic below takes a SIMD vector as its first argument
1334     require_simd!(arg_tys[0], "input");
1335     let in_ty = arg_tys[0];
1336     let in_elem = arg_tys[0].simd_type(tcx);
1337     let in_len = arg_tys[0].simd_size(tcx);
1338
1339     let comparison = match name {
1340         sym::simd_eq => Some(hir::BinOpKind::Eq),
1341         sym::simd_ne => Some(hir::BinOpKind::Ne),
1342         sym::simd_lt => Some(hir::BinOpKind::Lt),
1343         sym::simd_le => Some(hir::BinOpKind::Le),
1344         sym::simd_gt => Some(hir::BinOpKind::Gt),
1345         sym::simd_ge => Some(hir::BinOpKind::Ge),
1346         _ => None,
1347     };
1348
1349     if let Some(cmp_op) = comparison {
1350         require_simd!(ret_ty, "return");
1351
1352         let out_len = ret_ty.simd_size(tcx);
1353         require!(
1354             in_len == out_len,
1355             "expected return type with length {} (same as input type `{}`), \
1356                   found `{}` with length {}",
1357             in_len,
1358             in_ty,
1359             ret_ty,
1360             out_len
1361         );
1362         require!(
1363             bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1364             "expected return type with integer elements, found `{}` with non-integer `{}`",
1365             ret_ty,
1366             ret_ty.simd_type(tcx)
1367         );
1368
1369         return Ok(compare_simd_types(
1370             bx,
1371             args[0].immediate(),
1372             args[1].immediate(),
1373             in_elem,
1374             llret_ty,
1375             cmp_op,
1376         ));
1377     }
1378
1379     if name_str.starts_with("simd_shuffle") {
1380         let n: u64 = name_str["simd_shuffle".len()..].parse().unwrap_or_else(|_| {
1381             span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
1382         });
1383
1384         require_simd!(ret_ty, "return");
1385
1386         let out_len = ret_ty.simd_size(tcx);
1387         require!(
1388             out_len == n,
1389             "expected return type of length {}, found `{}` with length {}",
1390             n,
1391             ret_ty,
1392             out_len
1393         );
1394         require!(
1395             in_elem == ret_ty.simd_type(tcx),
1396             "expected return element type `{}` (element of input `{}`), \
1397                   found `{}` with element type `{}`",
1398             in_elem,
1399             in_ty,
1400             ret_ty,
1401             ret_ty.simd_type(tcx)
1402         );
1403
1404         let total_len = u128::from(in_len) * 2;
1405
1406         let vector = args[2].immediate();
1407
1408         let indices: Option<Vec<_>> = (0..n)
1409             .map(|i| {
1410                 let arg_idx = i;
1411                 let val = bx.const_get_elt(vector, i as u64);
1412                 match bx.const_to_opt_u128(val, true) {
1413                     None => {
1414                         emit_error!("shuffle index #{} is not a constant", arg_idx);
1415                         None
1416                     }
1417                     Some(idx) if idx >= total_len => {
1418                         emit_error!(
1419                             "shuffle index #{} is out of bounds (limit {})",
1420                             arg_idx,
1421                             total_len
1422                         );
1423                         None
1424                     }
1425                     Some(idx) => Some(bx.const_i32(idx as i32)),
1426                 }
1427             })
1428             .collect();
1429         let indices = match indices {
1430             Some(i) => i,
1431             None => return Ok(bx.const_null(llret_ty)),
1432         };
1433
1434         return Ok(bx.shuffle_vector(
1435             args[0].immediate(),
1436             args[1].immediate(),
1437             bx.const_vector(&indices),
1438         ));
1439     }
1440
1441     if name == sym::simd_insert {
1442         require!(
1443             in_elem == arg_tys[2],
1444             "expected inserted type `{}` (element of input `{}`), found `{}`",
1445             in_elem,
1446             in_ty,
1447             arg_tys[2]
1448         );
1449         return Ok(bx.insert_element(
1450             args[0].immediate(),
1451             args[2].immediate(),
1452             args[1].immediate(),
1453         ));
1454     }
1455     if name == sym::simd_extract {
1456         require!(
1457             ret_ty == in_elem,
1458             "expected return type `{}` (element of input `{}`), found `{}`",
1459             in_elem,
1460             in_ty,
1461             ret_ty
1462         );
1463         return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
1464     }
1465
1466     if name == sym::simd_select {
1467         let m_elem_ty = in_elem;
1468         let m_len = in_len;
1469         require_simd!(arg_tys[1], "argument");
1470         let v_len = arg_tys[1].simd_size(tcx);
1471         require!(
1472             m_len == v_len,
1473             "mismatched lengths: mask length `{}` != other vector length `{}`",
1474             m_len,
1475             v_len
1476         );
1477         match m_elem_ty.kind {
1478             ty::Int(_) => {}
1479             _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
1480         }
1481         // truncate the mask to a vector of i1s
1482         let i1 = bx.type_i1();
1483         let i1xn = bx.type_vector(i1, m_len as u64);
1484         let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1485         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1486     }
1487
1488     if name == sym::simd_bitmask {
1489         // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
1490         // vector mask and returns an unsigned integer containing the most
1491         // significant bit (MSB) of each lane.
1492
1493         // If the vector has less than 8 lanes, an u8 is returned with zeroed
1494         // trailing bits.
1495         let expected_int_bits = in_len.max(8);
1496         match ret_ty.kind {
1497             ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
1498             _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
1499         }
1500
1501         // Integer vector <i{in_bitwidth} x in_len>:
1502         let (i_xn, in_elem_bitwidth) = match in_elem.kind {
1503             ty::Int(i) => {
1504                 (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
1505             }
1506             ty::Uint(i) => {
1507                 (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
1508             }
1509             _ => return_error!(
1510                 "vector argument `{}`'s element type `{}`, expected integer element type",
1511                 in_ty,
1512                 in_elem
1513             ),
1514         };
1515
1516         // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1517         let shift_indices =
1518             vec![
1519                 bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1520                 in_len as _
1521             ];
1522         let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1523         // Truncate vector to an <i1 x N>
1524         let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
1525         // Bitcast <i1 x N> to iN:
1526         let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1527         // Zero-extend iN to the bitmask type:
1528         return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1529     }
1530
1531     fn simd_simple_float_intrinsic(
1532         name: &str,
1533         in_elem: &::rustc_middle::ty::TyS<'_>,
1534         in_ty: &::rustc_middle::ty::TyS<'_>,
1535         in_len: u64,
1536         bx: &mut Builder<'a, 'll, 'tcx>,
1537         span: Span,
1538         args: &[OperandRef<'tcx, &'ll Value>],
1539     ) -> Result<&'ll Value, ()> {
1540         macro_rules! emit_error {
1541             ($msg: tt) => {
1542                 emit_error!($msg, )
1543             };
1544             ($msg: tt, $($fmt: tt)*) => {
1545                 span_invalid_monomorphization_error(
1546                     bx.sess(), span,
1547                     &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1548                              name, $($fmt)*));
1549             }
1550         }
1551         macro_rules! return_error {
1552             ($($fmt: tt)*) => {
1553                 {
1554                     emit_error!($($fmt)*);
1555                     return Err(());
1556                 }
1557             }
1558         }
1559         let ety = match in_elem.kind {
1560             ty::Float(f) if f.bit_width() == 32 => {
1561                 if in_len < 2 || in_len > 16 {
1562                     return_error!(
1563                         "unsupported floating-point vector `{}` with length `{}` \
1564                          out-of-range [2, 16]",
1565                         in_ty,
1566                         in_len
1567                     );
1568                 }
1569                 "f32"
1570             }
1571             ty::Float(f) if f.bit_width() == 64 => {
1572                 if in_len < 2 || in_len > 8 {
1573                     return_error!(
1574                         "unsupported floating-point vector `{}` with length `{}` \
1575                                    out-of-range [2, 8]",
1576                         in_ty,
1577                         in_len
1578                     );
1579                 }
1580                 "f64"
1581             }
1582             ty::Float(f) => {
1583                 return_error!(
1584                     "unsupported element type `{}` of floating-point vector `{}`",
1585                     f.name_str(),
1586                     in_ty
1587                 );
1588             }
1589             _ => {
1590                 return_error!("`{}` is not a floating-point type", in_ty);
1591             }
1592         };
1593
1594         let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1595         let intrinsic = bx.get_intrinsic(&llvm_name);
1596         let c =
1597             bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
1598         unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1599         Ok(c)
1600     }
1601
1602     match name {
1603         sym::simd_fsqrt => {
1604             return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1605         }
1606         sym::simd_fsin => {
1607             return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1608         }
1609         sym::simd_fcos => {
1610             return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1611         }
1612         sym::simd_fabs => {
1613             return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1614         }
1615         sym::simd_floor => {
1616             return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1617         }
1618         sym::simd_ceil => {
1619             return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1620         }
1621         sym::simd_fexp => {
1622             return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1623         }
1624         sym::simd_fexp2 => {
1625             return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1626         }
1627         sym::simd_flog10 => {
1628             return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1629         }
1630         sym::simd_flog2 => {
1631             return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1632         }
1633         sym::simd_flog => {
1634             return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1635         }
1636         sym::simd_fpowi => {
1637             return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1638         }
1639         sym::simd_fpow => {
1640             return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1641         }
1642         sym::simd_fma => {
1643             return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1644         }
1645         _ => { /* fallthrough */ }
1646     }
1647
1648     // FIXME: use:
1649     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1650     //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1651     fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
1652         let p0s: String = "p0".repeat(no_pointers);
1653         match elem_ty.kind {
1654             ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1655             ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1656             ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1657             _ => unreachable!(),
1658         }
1659     }
1660
1661     fn llvm_vector_ty(
1662         cx: &CodegenCx<'ll, '_>,
1663         elem_ty: Ty<'_>,
1664         vec_len: u64,
1665         mut no_pointers: usize,
1666     ) -> &'ll Type {
1667         // FIXME: use cx.layout_of(ty).llvm_type() ?
1668         let mut elem_ty = match elem_ty.kind {
1669             ty::Int(v) => cx.type_int_from_ty(v),
1670             ty::Uint(v) => cx.type_uint_from_ty(v),
1671             ty::Float(v) => cx.type_float_from_ty(v),
1672             _ => unreachable!(),
1673         };
1674         while no_pointers > 0 {
1675             elem_ty = cx.type_ptr_to(elem_ty);
1676             no_pointers -= 1;
1677         }
1678         cx.type_vector(elem_ty, vec_len)
1679     }
1680
1681     if name == sym::simd_gather {
1682         // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1683         //             mask: <N x i{M}>) -> <N x T>
1684         // * N: number of elements in the input vectors
1685         // * T: type of the element to load
1686         // * M: any integer width is supported, will be truncated to i1
1687
1688         // All types must be simd vector types
1689         require_simd!(in_ty, "first");
1690         require_simd!(arg_tys[1], "second");
1691         require_simd!(arg_tys[2], "third");
1692         require_simd!(ret_ty, "return");
1693
1694         // Of the same length:
1695         require!(
1696             in_len == arg_tys[1].simd_size(tcx),
1697             "expected {} argument with length {} (same as input type `{}`), \
1698                   found `{}` with length {}",
1699             "second",
1700             in_len,
1701             in_ty,
1702             arg_tys[1],
1703             arg_tys[1].simd_size(tcx)
1704         );
1705         require!(
1706             in_len == arg_tys[2].simd_size(tcx),
1707             "expected {} argument with length {} (same as input type `{}`), \
1708                   found `{}` with length {}",
1709             "third",
1710             in_len,
1711             in_ty,
1712             arg_tys[2],
1713             arg_tys[2].simd_size(tcx)
1714         );
1715
1716         // The return type must match the first argument type
1717         require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
1718
1719         // This counts how many pointers
1720         fn ptr_count(t: Ty<'_>) -> usize {
1721             match t.kind {
1722                 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1723                 _ => 0,
1724             }
1725         }
1726
1727         // Non-ptr type
1728         fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1729             match t.kind {
1730                 ty::RawPtr(p) => non_ptr(p.ty),
1731                 _ => t,
1732             }
1733         }
1734
1735         // The second argument must be a simd vector with an element type that's a pointer
1736         // to the element type of the first argument
1737         let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1738             ty::RawPtr(p) if p.ty == in_elem => {
1739                 (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
1740             }
1741             _ => {
1742                 require!(
1743                     false,
1744                     "expected element type `{}` of second argument `{}` \
1745                                  to be a pointer to the element type `{}` of the first \
1746                                  argument `{}`, found `{}` != `*_ {}`",
1747                     arg_tys[1].simd_type(tcx),
1748                     arg_tys[1],
1749                     in_elem,
1750                     in_ty,
1751                     arg_tys[1].simd_type(tcx),
1752                     in_elem
1753                 );
1754                 unreachable!();
1755             }
1756         };
1757         assert!(pointer_count > 0);
1758         assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1759         assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1760
1761         // The element type of the third argument must be a signed integer type of any width:
1762         match arg_tys[2].simd_type(tcx).kind {
1763             ty::Int(_) => (),
1764             _ => {
1765                 require!(
1766                     false,
1767                     "expected element type `{}` of third argument `{}` \
1768                                  to be a signed integer type",
1769                     arg_tys[2].simd_type(tcx),
1770                     arg_tys[2]
1771                 );
1772             }
1773         }
1774
1775         // Alignment of T, must be a constant integer value:
1776         let alignment_ty = bx.type_i32();
1777         let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1778
1779         // Truncate the mask vector to a vector of i1s:
1780         let (mask, mask_ty) = {
1781             let i1 = bx.type_i1();
1782             let i1xn = bx.type_vector(i1, in_len);
1783             (bx.trunc(args[2].immediate(), i1xn), i1xn)
1784         };
1785
1786         // Type of the vector of pointers:
1787         let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1788         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1789
1790         // Type of the vector of elements:
1791         let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1792         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1793
1794         let llvm_intrinsic =
1795             format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1796         let f = bx.declare_cfn(
1797             &llvm_intrinsic,
1798             bx.type_func(
1799                 &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1800                 llvm_elem_vec_ty,
1801             ),
1802         );
1803         llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1804         let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
1805         return Ok(v);
1806     }
1807
1808     if name == sym::simd_scatter {
1809         // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1810         //             mask: <N x i{M}>) -> ()
1811         // * N: number of elements in the input vectors
1812         // * T: type of the element to load
1813         // * M: any integer width is supported, will be truncated to i1
1814
1815         // All types must be simd vector types
1816         require_simd!(in_ty, "first");
1817         require_simd!(arg_tys[1], "second");
1818         require_simd!(arg_tys[2], "third");
1819
1820         // Of the same length:
1821         require!(
1822             in_len == arg_tys[1].simd_size(tcx),
1823             "expected {} argument with length {} (same as input type `{}`), \
1824                   found `{}` with length {}",
1825             "second",
1826             in_len,
1827             in_ty,
1828             arg_tys[1],
1829             arg_tys[1].simd_size(tcx)
1830         );
1831         require!(
1832             in_len == arg_tys[2].simd_size(tcx),
1833             "expected {} argument with length {} (same as input type `{}`), \
1834                   found `{}` with length {}",
1835             "third",
1836             in_len,
1837             in_ty,
1838             arg_tys[2],
1839             arg_tys[2].simd_size(tcx)
1840         );
1841
1842         // This counts how many pointers
1843         fn ptr_count(t: Ty<'_>) -> usize {
1844             match t.kind {
1845                 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1846                 _ => 0,
1847             }
1848         }
1849
1850         // Non-ptr type
1851         fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1852             match t.kind {
1853                 ty::RawPtr(p) => non_ptr(p.ty),
1854                 _ => t,
1855             }
1856         }
1857
1858         // The second argument must be a simd vector with an element type that's a pointer
1859         // to the element type of the first argument
1860         let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1861             ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
1862                 (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
1863             }
1864             _ => {
1865                 require!(
1866                     false,
1867                     "expected element type `{}` of second argument `{}` \
1868                                  to be a pointer to the element type `{}` of the first \
1869                                  argument `{}`, found `{}` != `*mut {}`",
1870                     arg_tys[1].simd_type(tcx),
1871                     arg_tys[1],
1872                     in_elem,
1873                     in_ty,
1874                     arg_tys[1].simd_type(tcx),
1875                     in_elem
1876                 );
1877                 unreachable!();
1878             }
1879         };
1880         assert!(pointer_count > 0);
1881         assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1882         assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1883
1884         // The element type of the third argument must be a signed integer type of any width:
1885         match arg_tys[2].simd_type(tcx).kind {
1886             ty::Int(_) => (),
1887             _ => {
1888                 require!(
1889                     false,
1890                     "expected element type `{}` of third argument `{}` \
1891                                  to be a signed integer type",
1892                     arg_tys[2].simd_type(tcx),
1893                     arg_tys[2]
1894                 );
1895             }
1896         }
1897
1898         // Alignment of T, must be a constant integer value:
1899         let alignment_ty = bx.type_i32();
1900         let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1901
1902         // Truncate the mask vector to a vector of i1s:
1903         let (mask, mask_ty) = {
1904             let i1 = bx.type_i1();
1905             let i1xn = bx.type_vector(i1, in_len);
1906             (bx.trunc(args[2].immediate(), i1xn), i1xn)
1907         };
1908
1909         let ret_t = bx.type_void();
1910
1911         // Type of the vector of pointers:
1912         let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1913         let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1914
1915         // Type of the vector of elements:
1916         let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1917         let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1918
1919         let llvm_intrinsic =
1920             format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1921         let f = bx.declare_cfn(
1922             &llvm_intrinsic,
1923             bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
1924         );
1925         llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1926         let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
1927         return Ok(v);
1928     }
1929
1930     macro_rules! arith_red {
1931         ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
1932          $identity:expr) => {
1933             if name == sym::$name {
1934                 require!(
1935                     ret_ty == in_elem,
1936                     "expected return type `{}` (element of input `{}`), found `{}`",
1937                     in_elem,
1938                     in_ty,
1939                     ret_ty
1940                 );
1941                 return match in_elem.kind {
1942                     ty::Int(_) | ty::Uint(_) => {
1943                         let r = bx.$integer_reduce(args[0].immediate());
1944                         if $ordered {
1945                             // if overflow occurs, the result is the
1946                             // mathematical result modulo 2^n:
1947                             Ok(bx.$op(args[1].immediate(), r))
1948                         } else {
1949                             Ok(bx.$integer_reduce(args[0].immediate()))
1950                         }
1951                     }
1952                     ty::Float(f) => {
1953                         let acc = if $ordered {
1954                             // ordered arithmetic reductions take an accumulator
1955                             args[1].immediate()
1956                         } else {
1957                             // unordered arithmetic reductions use the identity accumulator
1958                             match f.bit_width() {
1959                                 32 => bx.const_real(bx.type_f32(), $identity),
1960                                 64 => bx.const_real(bx.type_f64(), $identity),
1961                                 v => return_error!(
1962                                     r#"
1963 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1964                                     sym::$name,
1965                                     in_ty,
1966                                     in_elem,
1967                                     v,
1968                                     ret_ty
1969                                 ),
1970                             }
1971                         };
1972                         Ok(bx.$float_reduce(acc, args[0].immediate()))
1973                     }
1974                     _ => return_error!(
1975                         "unsupported {} from `{}` with element `{}` to `{}`",
1976                         sym::$name,
1977                         in_ty,
1978                         in_elem,
1979                         ret_ty
1980                     ),
1981                 };
1982             }
1983         };
1984     }
1985
1986     arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
1987     arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
1988     arith_red!(
1989         simd_reduce_add_unordered: vector_reduce_add,
1990         vector_reduce_fadd_fast,
1991         false,
1992         add,
1993         0.0
1994     );
1995     arith_red!(
1996         simd_reduce_mul_unordered: vector_reduce_mul,
1997         vector_reduce_fmul_fast,
1998         false,
1999         mul,
2000         1.0
2001     );
2002
2003     macro_rules! minmax_red {
2004         ($name:ident: $int_red:ident, $float_red:ident) => {
2005             if name == sym::$name {
2006                 require!(
2007                     ret_ty == in_elem,
2008                     "expected return type `{}` (element of input `{}`), found `{}`",
2009                     in_elem,
2010                     in_ty,
2011                     ret_ty
2012                 );
2013                 return match in_elem.kind {
2014                     ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2015                     ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2016                     ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2017                     _ => return_error!(
2018                         "unsupported {} from `{}` with element `{}` to `{}`",
2019                         sym::$name,
2020                         in_ty,
2021                         in_elem,
2022                         ret_ty
2023                     ),
2024                 };
2025             }
2026         };
2027     }
2028
2029     minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2030     minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2031
2032     minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
2033     minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
2034
2035     macro_rules! bitwise_red {
2036         ($name:ident : $red:ident, $boolean:expr) => {
2037             if name == sym::$name {
2038                 let input = if !$boolean {
2039                     require!(
2040                         ret_ty == in_elem,
2041                         "expected return type `{}` (element of input `{}`), found `{}`",
2042                         in_elem,
2043                         in_ty,
2044                         ret_ty
2045                     );
2046                     args[0].immediate()
2047                 } else {
2048                     match in_elem.kind {
2049                         ty::Int(_) | ty::Uint(_) => {}
2050                         _ => return_error!(
2051                             "unsupported {} from `{}` with element `{}` to `{}`",
2052                             sym::$name,
2053                             in_ty,
2054                             in_elem,
2055                             ret_ty
2056                         ),
2057                     }
2058
2059                     // boolean reductions operate on vectors of i1s:
2060                     let i1 = bx.type_i1();
2061                     let i1xn = bx.type_vector(i1, in_len as u64);
2062                     bx.trunc(args[0].immediate(), i1xn)
2063                 };
2064                 return match in_elem.kind {
2065                     ty::Int(_) | ty::Uint(_) => {
2066                         let r = bx.$red(input);
2067                         Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2068                     }
2069                     _ => return_error!(
2070                         "unsupported {} from `{}` with element `{}` to `{}`",
2071                         sym::$name,
2072                         in_ty,
2073                         in_elem,
2074                         ret_ty
2075                     ),
2076                 };
2077             }
2078         };
2079     }
2080
2081     bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2082     bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2083     bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2084     bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2085     bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2086
2087     if name == sym::simd_cast {
2088         require_simd!(ret_ty, "return");
2089         let out_len = ret_ty.simd_size(tcx);
2090         require!(
2091             in_len == out_len,
2092             "expected return type with length {} (same as input type `{}`), \
2093                   found `{}` with length {}",
2094             in_len,
2095             in_ty,
2096             ret_ty,
2097             out_len
2098         );
2099         // casting cares about nominal type, not just structural type
2100         let out_elem = ret_ty.simd_type(tcx);
2101
2102         if in_elem == out_elem {
2103             return Ok(args[0].immediate());
2104         }
2105
2106         enum Style {
2107             Float,
2108             Int(/* is signed? */ bool),
2109             Unsupported,
2110         }
2111
2112         let (in_style, in_width) = match in_elem.kind {
2113             // vectors of pointer-sized integers should've been
2114             // disallowed before here, so this unwrap is safe.
2115             ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
2116             ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
2117             ty::Float(f) => (Style::Float, f.bit_width()),
2118             _ => (Style::Unsupported, 0),
2119         };
2120         let (out_style, out_width) = match out_elem.kind {
2121             ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
2122             ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
2123             ty::Float(f) => (Style::Float, f.bit_width()),
2124             _ => (Style::Unsupported, 0),
2125         };
2126
2127         match (in_style, out_style) {
2128             (Style::Int(in_is_signed), Style::Int(_)) => {
2129                 return Ok(match in_width.cmp(&out_width) {
2130                     Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2131                     Ordering::Equal => args[0].immediate(),
2132                     Ordering::Less => {
2133                         if in_is_signed {
2134                             bx.sext(args[0].immediate(), llret_ty)
2135                         } else {
2136                             bx.zext(args[0].immediate(), llret_ty)
2137                         }
2138                     }
2139                 });
2140             }
2141             (Style::Int(in_is_signed), Style::Float) => {
2142                 return Ok(if in_is_signed {
2143                     bx.sitofp(args[0].immediate(), llret_ty)
2144                 } else {
2145                     bx.uitofp(args[0].immediate(), llret_ty)
2146                 });
2147             }
2148             (Style::Float, Style::Int(out_is_signed)) => {
2149                 return Ok(if out_is_signed {
2150                     bx.fptosi(args[0].immediate(), llret_ty)
2151                 } else {
2152                     bx.fptoui(args[0].immediate(), llret_ty)
2153                 });
2154             }
2155             (Style::Float, Style::Float) => {
2156                 return Ok(match in_width.cmp(&out_width) {
2157                     Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2158                     Ordering::Equal => args[0].immediate(),
2159                     Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2160                 });
2161             }
2162             _ => { /* Unsupported. Fallthrough. */ }
2163         }
2164         require!(
2165             false,
2166             "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
2167             in_ty,
2168             in_elem,
2169             ret_ty,
2170             out_elem
2171         );
2172     }
2173     macro_rules! arith {
2174         ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
2175             $(if name == sym::$name {
2176                 match in_elem.kind {
2177                     $($(ty::$p(_))|* => {
2178                         return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
2179                     })*
2180                     _ => {},
2181                 }
2182                 require!(false,
2183                          "unsupported operation on `{}` with element `{}`",
2184                          in_ty,
2185                          in_elem)
2186             })*
2187         }
2188     }
2189     arith! {
2190         simd_add: Uint, Int => add, Float => fadd;
2191         simd_sub: Uint, Int => sub, Float => fsub;
2192         simd_mul: Uint, Int => mul, Float => fmul;
2193         simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2194         simd_rem: Uint => urem, Int => srem, Float => frem;
2195         simd_shl: Uint, Int => shl;
2196         simd_shr: Uint => lshr, Int => ashr;
2197         simd_and: Uint, Int => and;
2198         simd_or: Uint, Int => or;
2199         simd_xor: Uint, Int => xor;
2200         simd_fmax: Float => maxnum;
2201         simd_fmin: Float => minnum;
2202
2203     }
2204
2205     if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
2206         let lhs = args[0].immediate();
2207         let rhs = args[1].immediate();
2208         let is_add = name == sym::simd_saturating_add;
2209         let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
2210         let (signed, elem_width, elem_ty) = match in_elem.kind {
2211             ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
2212             ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
2213             _ => {
2214                 return_error!(
2215                     "expected element type `{}` of vector type `{}` \
2216                      to be a signed or unsigned integer type",
2217                     arg_tys[0].simd_type(tcx),
2218                     arg_tys[0]
2219                 );
2220             }
2221         };
2222         let llvm_intrinsic = &format!(
2223             "llvm.{}{}.sat.v{}i{}",
2224             if signed { 's' } else { 'u' },
2225             if is_add { "add" } else { "sub" },
2226             in_len,
2227             elem_width
2228         );
2229         let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2230
2231         let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
2232         llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
2233         let v = bx.call(f, &[lhs, rhs], None);
2234         return Ok(v);
2235     }
2236
2237     span_bug!(span, "unknown SIMD intrinsic");
2238 }
2239
2240 // Returns the width of an int Ty, and if it's signed or not
2241 // Returns None if the type is not an integer
2242 // FIXME: there’s multiple of this functions, investigate using some of the already existing
2243 // stuffs.
2244 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
2245     match ty.kind {
2246         ty::Int(t) => Some((
2247             match t {
2248                 ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
2249                 ast::IntTy::I8 => 8,
2250                 ast::IntTy::I16 => 16,
2251                 ast::IntTy::I32 => 32,
2252                 ast::IntTy::I64 => 64,
2253                 ast::IntTy::I128 => 128,
2254             },
2255             true,
2256         )),
2257         ty::Uint(t) => Some((
2258             match t {
2259                 ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
2260                 ast::UintTy::U8 => 8,
2261                 ast::UintTy::U16 => 16,
2262                 ast::UintTy::U32 => 32,
2263                 ast::UintTy::U64 => 64,
2264                 ast::UintTy::U128 => 128,
2265             },
2266             false,
2267         )),
2268         _ => None,
2269     }
2270 }
2271
2272 // Returns the width of a float Ty
2273 // Returns None if the type is not a float
2274 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
2275     match ty.kind {
2276         ty::Float(t) => Some(t.bit_width()),
2277         _ => None,
2278     }
2279 }
2280
2281 fn op_to_u32<'tcx>(op: &Operand<'tcx>) -> u32 {
2282     Operand::scalar_from_const(op).to_u32().expect("Scalar is u32")
2283 }
2284
2285 fn op_to_u64<'tcx>(op: &Operand<'tcx>) -> u64 {
2286     Operand::scalar_from_const(op).to_u64().expect("Scalar is u64")
2287 }