]> git.lizzy.rs Git - rust.git/blob - src/abi.rs
Use FunctionBuilder::call_mem{cpy,move}
[rust.git] / src / abi.rs
1 use std::iter;
2
3 use crate::rustc::hir;
4 use crate::rustc_target::spec::abi::Abi;
5
6 use crate::prelude::*;
7
8 #[derive(Debug)]
9 enum PassMode {
10     NoPass,
11     ByVal(Type),
12     ByRef,
13 }
14
15 impl PassMode {
16     fn get_param_ty(self, fx: &FunctionCx<impl Backend>) -> Type {
17         match self {
18             PassMode::NoPass => unimplemented!("pass mode nopass"),
19             PassMode::ByVal(cton_type) => cton_type,
20             PassMode::ByRef => fx.module.pointer_type(),
21         }
22     }
23 }
24
25 fn get_pass_mode<'a, 'tcx: 'a>(
26     tcx: TyCtxt<'a, 'tcx, 'tcx>,
27     abi: Abi,
28     ty: Ty<'tcx>,
29     is_return: bool,
30 ) -> PassMode {
31     assert!(
32         !tcx.layout_of(ParamEnv::reveal_all().and(ty))
33             .unwrap()
34             .is_unsized()
35     );
36     if let ty::Never = ty.sty {
37         if is_return {
38             PassMode::NoPass
39         } else {
40             PassMode::ByRef
41         }
42     } else if ty.sty == tcx.mk_unit().sty {
43         if is_return {
44             PassMode::NoPass
45         } else {
46             PassMode::ByRef
47         }
48     } else if let Some(ret_ty) = crate::common::cton_type_from_ty(tcx, ty) {
49         PassMode::ByVal(ret_ty)
50     } else {
51         if abi == Abi::C {
52             unimpl!(
53                 "Non scalars are not yet supported for \"C\" abi ({:?}) is_return: {:?}",
54                 ty,
55                 is_return
56             );
57         }
58         PassMode::ByRef
59     }
60 }
61
62 fn adjust_arg_for_abi<'a, 'tcx: 'a>(
63     fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
64     sig: FnSig<'tcx>,
65     arg: CValue<'tcx>,
66 ) -> Value {
67     match get_pass_mode(fx.tcx, sig.abi, arg.layout().ty, false) {
68         PassMode::NoPass => unimplemented!("pass mode nopass"),
69         PassMode::ByVal(_) => arg.load_value(fx),
70         PassMode::ByRef => arg.force_stack(fx),
71     }
72 }
73
74 pub fn cton_sig_from_fn_ty<'a, 'tcx: 'a>(
75     tcx: TyCtxt<'a, 'tcx, 'tcx>,
76     fn_ty: Ty<'tcx>,
77 ) -> Signature {
78     let sig = ty_fn_sig(tcx, fn_ty);
79     assert!(!sig.variadic, "Variadic function are not yet supported");
80     let (call_conv, inputs, output): (CallConv, Vec<Ty>, Ty) = match sig.abi {
81         Abi::Rust => (CallConv::Fast, sig.inputs().to_vec(), sig.output()),
82         Abi::C => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
83         Abi::RustCall => {
84             assert_eq!(sig.inputs().len(), 2);
85             let extra_args = match sig.inputs().last().unwrap().sty {
86                 ty::Tuple(ref tupled_arguments) => tupled_arguments,
87                 _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
88             };
89             let mut inputs: Vec<Ty> = vec![sig.inputs()[0]];
90             inputs.extend(extra_args.into_iter());
91             (CallConv::Fast, inputs, sig.output())
92         }
93         Abi::System => bug!("system abi should be selected elsewhere"),
94         Abi::RustIntrinsic => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
95         _ => unimplemented!("unsupported abi {:?}", sig.abi),
96     };
97
98     let inputs = inputs
99         .into_iter()
100         .filter_map(|ty| match get_pass_mode(tcx, sig.abi, ty, false) {
101             PassMode::ByVal(cton_ty) => Some(cton_ty),
102             PassMode::NoPass => unimplemented!("pass mode nopass"),
103             PassMode::ByRef => Some(pointer_ty(tcx)),
104         });
105
106     let (params, returns) = match get_pass_mode(tcx, sig.abi, output, true) {
107         PassMode::NoPass => (inputs.map(AbiParam::new).collect(), vec![]),
108         PassMode::ByVal(ret_ty) => (
109             inputs.map(AbiParam::new).collect(),
110             vec![AbiParam::new(ret_ty)],
111         ),
112         PassMode::ByRef => {
113             (
114                 Some(pointer_ty(tcx)) // First param is place to put return val
115                     .into_iter()
116                     .chain(inputs)
117                     .map(AbiParam::new)
118                     .collect(),
119                 vec![],
120             )
121         }
122     };
123
124     Signature {
125         params,
126         returns,
127         call_conv,
128     }
129 }
130
131 fn ty_fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> ty::FnSig<'tcx> {
132     let sig = match ty.sty {
133         ty::FnDef(..) |
134         // Shims currently have type TyFnPtr. Not sure this should remain.
135         ty::FnPtr(_) => ty.fn_sig(tcx),
136         ty::Closure(def_id, substs) => {
137             let sig = substs.closure_sig(def_id, tcx);
138
139             let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
140             sig.map_bound(|sig| tcx.mk_fn_sig(
141                 iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
142                 sig.output(),
143                 sig.variadic,
144                 sig.unsafety,
145                 sig.abi
146             ))
147         }
148         ty::Generator(def_id, substs, _) => {
149             let sig = substs.poly_sig(def_id, tcx);
150
151             let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
152             let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
153
154             sig.map_bound(|sig| {
155                 let state_did = tcx.lang_items().gen_state().unwrap();
156                 let state_adt_ref = tcx.adt_def(state_did);
157                 let state_substs = tcx.intern_substs(&[
158                     sig.yield_ty.into(),
159                     sig.return_ty.into(),
160                 ]);
161                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
162
163                 tcx.mk_fn_sig(iter::once(env_ty),
164                     ret_ty,
165                     false,
166                     hir::Unsafety::Normal,
167                     Abi::Rust
168                 )
169             })
170         }
171         _ => bug!("unexpected type {:?} to ty_fn_sig", ty)
172     };
173     tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &sig)
174 }
175
176 pub fn get_function_name_and_sig<'a, 'tcx>(
177     tcx: TyCtxt<'a, 'tcx, 'tcx>,
178     inst: Instance<'tcx>,
179 ) -> (String, Signature) {
180     assert!(!inst.substs.needs_infer() && !inst.substs.has_param_types());
181     let fn_ty = inst.ty(tcx);
182     let sig = cton_sig_from_fn_ty(tcx, fn_ty);
183     (tcx.symbol_name(inst).as_str().to_string(), sig)
184 }
185
186 impl<'a, 'tcx: 'a, B: Backend + 'a> FunctionCx<'a, 'tcx, B> {
187     /// Instance must be monomorphized
188     pub fn get_function_id(&mut self, inst: Instance<'tcx>) -> FuncId {
189         let (name, sig) = get_function_name_and_sig(self.tcx, inst);
190         self.module
191             .declare_function(&name, Linkage::Import, &sig)
192             .unwrap()
193     }
194
195     /// Instance must be monomorphized
196     pub fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
197         let func_id = self.get_function_id(inst);
198         self.module
199             .declare_func_in_func(func_id, &mut self.bcx.func)
200     }
201
202     fn lib_call(
203         &mut self,
204         name: &str,
205         input_tys: Vec<types::Type>,
206         output_ty: Option<types::Type>,
207         args: &[Value],
208     ) -> Option<Value> {
209         let sig = Signature {
210             params: input_tys.iter().cloned().map(AbiParam::new).collect(),
211             returns: output_ty
212                 .map(|output_ty| vec![AbiParam::new(output_ty)])
213                 .unwrap_or(Vec::new()),
214             call_conv: CallConv::SystemV,
215         };
216         let func_id = self
217             .module
218             .declare_function(&name, Linkage::Import, &sig)
219             .unwrap();
220         let func_ref = self
221             .module
222             .declare_func_in_func(func_id, &mut self.bcx.func);
223         let call_inst = self.bcx.ins().call(func_ref, args);
224         if output_ty.is_none() {
225             return None;
226         }
227         let results = self.bcx.inst_results(call_inst);
228         assert_eq!(results.len(), 1);
229         Some(results[0])
230     }
231
232     pub fn easy_call(
233         &mut self,
234         name: &str,
235         args: &[CValue<'tcx>],
236         return_ty: Ty<'tcx>,
237     ) -> CValue<'tcx> {
238         let (input_tys, args): (Vec<_>, Vec<_>) = args
239             .into_iter()
240             .map(|arg| {
241                 (
242                     self.cton_type(arg.layout().ty).unwrap(),
243                     arg.load_value(self),
244                 )
245             }).unzip();
246         let return_layout = self.layout_of(return_ty);
247         let return_ty = if let ty::Tuple(tup) = return_ty.sty {
248             if !tup.is_empty() {
249                 bug!("easy_call( (...) -> <non empty tuple> ) is not allowed");
250             }
251             None
252         } else {
253             Some(self.cton_type(return_ty).unwrap())
254         };
255         if let Some(val) = self.lib_call(name, input_tys, return_ty, &args) {
256             CValue::ByVal(val, return_layout)
257         } else {
258             CValue::ByRef(
259                 self.bcx.ins().iconst(self.module.pointer_type(), 0),
260                 return_layout,
261             )
262         }
263     }
264
265     fn self_sig(&self) -> FnSig<'tcx> {
266         ty_fn_sig(self.tcx, self.instance.ty(self.tcx))
267     }
268
269     fn return_type(&self) -> Ty<'tcx> {
270         self.self_sig().output()
271     }
272 }
273
274 pub fn codegen_fn_prelude<'a, 'tcx: 'a>(
275     fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
276     start_ebb: Ebb,
277 ) {
278     let ssa_analyzed = crate::analyze::analyze(fx);
279
280     let ret_layout = fx.layout_of(fx.return_type());
281     let output_pass_mode = get_pass_mode(fx.tcx, fx.self_sig().abi, fx.return_type(), true);
282     let ret_param = match output_pass_mode {
283         PassMode::NoPass => None,
284         PassMode::ByVal(_) => None,
285         PassMode::ByRef => Some(fx.bcx.append_ebb_param(start_ebb, fx.module.pointer_type())),
286     };
287
288     enum ArgKind {
289         Normal(Value),
290         Spread(Vec<Value>),
291     }
292
293     let func_params = fx
294         .mir
295         .args_iter()
296         .map(|local| {
297             let arg_ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
298
299             // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
300             if Some(local) == fx.mir.spread_arg {
301                 // This argument (e.g. the last argument in the "rust-call" ABI)
302                 // is a tuple that was spread at the ABI level and now we have
303                 // to reconstruct it into a tuple local variable, from multiple
304                 // individual function arguments.
305
306                 let tupled_arg_tys = match arg_ty.sty {
307                     ty::Tuple(ref tys) => tys,
308                     _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
309                 };
310
311                 let mut ebb_params = Vec::new();
312                 for arg_ty in tupled_arg_tys.iter() {
313                     let cton_type =
314                         get_pass_mode(fx.tcx, fx.self_sig().abi, arg_ty, false).get_param_ty(fx);
315                     ebb_params.push(fx.bcx.append_ebb_param(start_ebb, cton_type));
316                 }
317
318                 (local, ArgKind::Spread(ebb_params), arg_ty)
319             } else {
320                 let cton_type =
321                     get_pass_mode(fx.tcx, fx.self_sig().abi, arg_ty, false).get_param_ty(fx);
322                 (
323                     local,
324                     ArgKind::Normal(fx.bcx.append_ebb_param(start_ebb, cton_type)),
325                     arg_ty,
326                 )
327             }
328         }).collect::<Vec<(Local, ArgKind, Ty)>>();
329
330     fx.bcx.switch_to_block(start_ebb);
331
332     fx.top_nop = Some(fx.bcx.ins().nop());
333     fx.add_global_comment(format!("ssa {:?}", ssa_analyzed));
334
335     match output_pass_mode {
336         PassMode::NoPass => {
337             let null = fx.bcx.ins().iconst(fx.module.pointer_type(), 0);
338             //unimplemented!("pass mode nopass");
339             fx.local_map.insert(
340                 RETURN_PLACE,
341                 CPlace::Addr(null, None, fx.layout_of(fx.return_type())),
342             );
343         }
344         PassMode::ByVal(ret_ty) => {
345             fx.bcx.declare_var(mir_var(RETURN_PLACE), ret_ty);
346             fx.local_map
347                 .insert(RETURN_PLACE, CPlace::Var(RETURN_PLACE, ret_layout));
348         }
349         PassMode::ByRef => {
350             fx.local_map.insert(
351                 RETURN_PLACE,
352                 CPlace::Addr(ret_param.unwrap(), None, ret_layout),
353             );
354         }
355     }
356
357     for (local, arg_kind, ty) in func_params {
358         let layout = fx.layout_of(ty);
359
360         if let ArgKind::Normal(ebb_param) = arg_kind {
361             if !ssa_analyzed
362                 .get(&local)
363                 .unwrap()
364                 .contains(crate::analyze::Flags::NOT_SSA)
365             {
366                 fx.bcx
367                     .declare_var(mir_var(local), fx.cton_type(ty).unwrap());
368                 match get_pass_mode(fx.tcx, fx.self_sig().abi, ty, false) {
369                     PassMode::NoPass => unimplemented!("pass mode nopass"),
370                     PassMode::ByVal(_) => fx.bcx.def_var(mir_var(local), ebb_param),
371                     PassMode::ByRef => {
372                         let val = CValue::ByRef(ebb_param, fx.layout_of(ty)).load_value(fx);
373                         fx.bcx.def_var(mir_var(local), val);
374                     }
375                 }
376                 fx.local_map.insert(local, CPlace::Var(local, layout));
377                 continue;
378             }
379         }
380
381         let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
382             kind: StackSlotKind::ExplicitSlot,
383             size: layout.size.bytes() as u32,
384             offset: None,
385         });
386
387         let place = CPlace::from_stack_slot(fx, stack_slot, ty);
388
389         match arg_kind {
390             ArgKind::Normal(ebb_param) => match get_pass_mode(fx.tcx, fx.self_sig().abi, ty, false)
391             {
392                 PassMode::NoPass => unimplemented!("pass mode nopass"),
393                 PassMode::ByVal(_) => {
394                     place.write_cvalue(fx, CValue::ByVal(ebb_param, place.layout()))
395                 }
396                 PassMode::ByRef => place.write_cvalue(fx, CValue::ByRef(ebb_param, place.layout())),
397             },
398             ArgKind::Spread(ebb_params) => {
399                 for (i, ebb_param) in ebb_params.into_iter().enumerate() {
400                     let sub_place = place.place_field(fx, mir::Field::new(i));
401                     match get_pass_mode(fx.tcx, fx.self_sig().abi, sub_place.layout().ty, false) {
402                         PassMode::NoPass => unimplemented!("pass mode nopass"),
403                         PassMode::ByVal(_) => {
404                             sub_place.write_cvalue(fx, CValue::ByVal(ebb_param, sub_place.layout()))
405                         }
406                         PassMode::ByRef => {
407                             sub_place.write_cvalue(fx, CValue::ByRef(ebb_param, sub_place.layout()))
408                         }
409                     }
410                 }
411             }
412         }
413         fx.local_map.insert(local, place);
414     }
415
416     for local in fx.mir.vars_and_temps_iter() {
417         let ty = fx.mir.local_decls[local].ty;
418         let layout = fx.layout_of(ty);
419
420         let place = if ssa_analyzed
421             .get(&local)
422             .unwrap()
423             .contains(crate::analyze::Flags::NOT_SSA)
424         {
425             let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
426                 kind: StackSlotKind::ExplicitSlot,
427                 size: layout.size.bytes() as u32,
428                 offset: None,
429             });
430             CPlace::from_stack_slot(fx, stack_slot, ty)
431         } else {
432             fx.bcx
433                 .declare_var(mir_var(local), fx.cton_type(ty).unwrap());
434             CPlace::Var(local, layout)
435         };
436
437         fx.local_map.insert(local, place);
438     }
439
440     fx.bcx
441         .ins()
442         .jump(*fx.ebb_map.get(&START_BLOCK).unwrap(), &[]);
443 }
444
445 pub fn codegen_terminator_call<'a, 'tcx: 'a>(
446     fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
447     func: &Operand<'tcx>,
448     args: &[Operand<'tcx>],
449     destination: &Option<(Place<'tcx>, BasicBlock)>,
450 ) {
451     let fn_ty = fx.monomorphize(&func.ty(&fx.mir.local_decls, fx.tcx));
452     let sig = ty_fn_sig(fx.tcx, fn_ty);
453
454     // Unpack arguments tuple for closures
455     let args = if sig.abi == Abi::RustCall {
456         assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
457         let self_arg = trans_operand(fx, &args[0]);
458         let pack_arg = trans_operand(fx, &args[1]);
459         let mut args = Vec::new();
460         args.push(self_arg);
461         match pack_arg.layout().ty.sty {
462             ty::Tuple(ref tupled_arguments) => {
463                 for (i, _) in tupled_arguments.iter().enumerate() {
464                     args.push(pack_arg.value_field(fx, mir::Field::new(i)));
465                 }
466             }
467             _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
468         }
469         args
470     } else {
471         args.into_iter()
472             .map(|arg| trans_operand(fx, arg))
473             .collect::<Vec<_>>()
474     };
475
476     let destination = destination
477         .as_ref()
478         .map(|&(ref place, bb)| (trans_place(fx, place), bb));
479
480     if !codegen_intrinsic_call(fx, fn_ty, &args, destination) {
481         codegen_call_inner(
482             fx,
483             Some(func),
484             fn_ty,
485             args,
486             destination.map(|(place, _)| place),
487         );
488
489         if let Some((_, dest)) = destination {
490             let ret_ebb = fx.get_ebb(dest);
491             fx.bcx.ins().jump(ret_ebb, &[]);
492         } else {
493             fx.bcx.ins().trap(TrapCode::User(!0));
494         }
495     }
496 }
497
498 pub fn codegen_call_inner<'a, 'tcx: 'a>(
499     fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
500     func: Option<&Operand<'tcx>>,
501     fn_ty: Ty<'tcx>,
502     args: Vec<CValue<'tcx>>,
503     ret_place: Option<CPlace<'tcx>>,
504 ) {
505     let sig = ty_fn_sig(fx.tcx, fn_ty);
506
507     let ret_layout = fx.layout_of(sig.output());
508
509     let output_pass_mode = get_pass_mode(fx.tcx, sig.abi, sig.output(), true);
510     let return_ptr = match output_pass_mode {
511         PassMode::NoPass => None,
512         PassMode::ByRef => match ret_place {
513             Some(ret_place) => Some(ret_place.expect_addr()),
514             None => Some(fx.bcx.ins().iconst(fx.module.pointer_type(), 0)),
515         },
516         PassMode::ByVal(_) => None,
517     };
518
519     let instance = match fn_ty.sty {
520         ty::FnDef(def_id, substs) => {
521             Some(Instance::resolve(fx.tcx, ParamEnv::reveal_all(), def_id, substs).unwrap())
522         }
523         _ => None,
524     };
525
526     let func_ref: Option<Value>; // Indirect call target
527
528     let first_arg = {
529         if let Some(Instance {
530             def: InstanceDef::Virtual(_, idx),
531             ..
532         }) = instance
533         {
534             let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
535             func_ref = Some(method);
536             Some(ptr)
537         } else {
538             func_ref = if instance.is_none() {
539                 let func = trans_operand(fx, func.expect("indirect call without func Operand"));
540                 Some(func.load_value(fx))
541             } else {
542                 None
543             };
544
545             args.get(0).map(|arg| adjust_arg_for_abi(fx, sig, *arg))
546         }.into_iter()
547     };
548
549     let call_args: Vec<Value> = return_ptr
550         .into_iter()
551         .chain(first_arg)
552         .chain(
553             args.into_iter()
554                 .skip(1)
555                 .map(|arg| adjust_arg_for_abi(fx, sig, arg)),
556         ).collect::<Vec<_>>();
557
558     let sig = fx.bcx.import_signature(cton_sig_from_fn_ty(fx.tcx, fn_ty));
559     let call_inst = if let Some(func_ref) = func_ref {
560         fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
561     } else {
562         let func_ref = fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
563         fx.bcx.ins().call(func_ref, &call_args)
564     };
565
566     match output_pass_mode {
567         PassMode::NoPass => {}
568         PassMode::ByVal(_) => {
569             if let Some(ret_place) = ret_place {
570                 let results = fx.bcx.inst_results(call_inst);
571                 ret_place.write_cvalue(fx, CValue::ByVal(results[0], ret_layout));
572             }
573         }
574         PassMode::ByRef => {}
575     }
576 }
577
578 pub fn codegen_return(fx: &mut FunctionCx<impl Backend>) {
579     match get_pass_mode(fx.tcx, fx.self_sig().abi, fx.return_type(), true) {
580         PassMode::NoPass | PassMode::ByRef => {
581             fx.bcx.ins().return_(&[]);
582         }
583         PassMode::ByVal(_) => {
584             let place = fx.get_local_place(RETURN_PLACE);
585             let ret_val = place.to_cvalue(fx).load_value(fx);
586             fx.bcx.ins().return_(&[ret_val]);
587         }
588     }
589 }
590
591 fn codegen_intrinsic_call<'a, 'tcx: 'a>(
592     fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
593     fn_ty: Ty<'tcx>,
594     args: &[CValue<'tcx>],
595     destination: Option<(CPlace<'tcx>, BasicBlock)>,
596 ) -> bool {
597     if let ty::FnDef(def_id, substs) = fn_ty.sty {
598         let sig = ty_fn_sig(fx.tcx, fn_ty);
599
600         if sig.abi == Abi::RustIntrinsic {
601             let intrinsic = fx.tcx.item_name(def_id).as_str();
602             let intrinsic = &intrinsic[..];
603
604             let ret = match destination {
605                 Some((place, _)) => place,
606                 None => {
607                     // Insert non returning intrinsics here
608                     match intrinsic {
609                         "abort" => {
610                             fx.bcx.ins().trap(TrapCode::User(!0 - 1));
611                         }
612                         "unreachable" => {
613                             fx.bcx.ins().trap(TrapCode::User(!0 - 1));
614                         }
615                         _ => unimplemented!("unsupported instrinsic {}", intrinsic),
616                     }
617                     return true;
618                 }
619             };
620
621             let nil_ty = fx.tcx.mk_unit();
622             let u64_layout = fx.layout_of(fx.tcx.types.u64);
623             let usize_layout = fx.layout_of(fx.tcx.types.usize);
624
625             match intrinsic {
626                 "assume" => {
627                     assert_eq!(args.len(), 1);
628                 }
629                 "arith_offset" => {
630                     assert_eq!(args.len(), 2);
631                     let base = args[0].load_value(fx);
632                     let offset = args[1].load_value(fx);
633                     let res = fx.bcx.ins().iadd(base, offset);
634                     let res = CValue::ByVal(res, ret.layout());
635                     ret.write_cvalue(fx, res);
636                 }
637                 "likely" | "unlikely" => {
638                     assert_eq!(args.len(), 1);
639                     ret.write_cvalue(fx, args[0]);
640                 }
641                 "copy" | "copy_nonoverlapping" => {
642                     let elem_ty = substs.type_at(0);
643                     let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
644                     let elem_size = fx
645                         .bcx
646                         .ins()
647                         .iconst(fx.module.pointer_type(), elem_size as i64);
648                     assert_eq!(args.len(), 3);
649                     let src = args[0].load_value(fx);
650                     let dst = args[1].load_value(fx);
651                     let count = args[2].load_value(fx);
652                     let byte_amount = fx.bcx.ins().imul(count, elem_size);
653
654                     if intrinsic.ends_with("_nonoverlapping") {
655                         fx.bcx.call_memcpy(fx.isa, dst, src, byte_amount);
656                     } else {
657                         fx.bcx.call_memmove(fx.isa, dst, src, byte_amount);
658                     }
659                 }
660                 "discriminant_value" => {
661                     assert_eq!(args.len(), 1);
662                     let discr = crate::base::trans_get_discriminant(fx, args[0], ret.layout());
663                     ret.write_cvalue(fx, discr);
664                 }
665                 "size_of" => {
666                     assert_eq!(args.len(), 0);
667                     let size_of = fx.layout_of(substs.type_at(0)).size.bytes();
668                     let size_of = CValue::const_val(fx, usize_layout.ty, size_of as i64);
669                     ret.write_cvalue(fx, size_of);
670                 }
671                 "size_of_val" => {
672                     assert_eq!(args.len(), 1);
673                     let layout = fx.layout_of(substs.type_at(0));
674                     let size = match &layout.ty.sty {
675                         _ if !layout.is_unsized() => fx
676                             .bcx
677                             .ins()
678                             .iconst(fx.module.pointer_type(), layout.size.bytes() as i64),
679                         ty::Slice(elem) => {
680                             let len = args[0].load_value_pair(fx).1;
681                             let elem_size = fx.layout_of(elem).size.bytes();
682                             fx.bcx.ins().imul_imm(len, elem_size as i64)
683                         }
684                         ty::Dynamic(..) => crate::vtable::size_of_obj(fx, args[0]),
685                         ty => bug!("size_of_val for unknown unsized type {:?}", ty),
686                     };
687                     ret.write_cvalue(fx, CValue::ByVal(size, usize_layout));
688                 }
689                 "min_align_of" => {
690                     assert_eq!(args.len(), 0);
691                     let min_align = fx.layout_of(substs.type_at(0)).align.abi();
692                     let min_align = CValue::const_val(fx, usize_layout.ty, min_align as i64);
693                     ret.write_cvalue(fx, min_align);
694                 }
695                 "min_align_of_val" => {
696                     assert_eq!(args.len(), 1);
697                     let layout = fx.layout_of(substs.type_at(0));
698                     let align = match &layout.ty.sty {
699                         _ if !layout.is_unsized() => fx
700                             .bcx
701                             .ins()
702                             .iconst(fx.module.pointer_type(), layout.align.abi() as i64),
703                         ty::Slice(elem) => {
704                             let align = fx.layout_of(elem).align.abi() as i64;
705                             fx.bcx.ins().iconst(fx.module.pointer_type(), align)
706                         }
707                         ty::Dynamic(..) => crate::vtable::min_align_of_obj(fx, args[0]),
708                         ty => unimplemented!("min_align_of_val for {:?}", ty),
709                     };
710                     ret.write_cvalue(fx, CValue::ByVal(align, usize_layout));
711                 }
712                 "type_id" => {
713                     assert_eq!(args.len(), 0);
714                     let type_id = fx.tcx.type_id_hash(substs.type_at(0));
715                     let type_id = CValue::const_val(fx, u64_layout.ty, type_id as i64);
716                     ret.write_cvalue(fx, type_id);
717                 }
718                 _ if intrinsic.starts_with("unchecked_") => {
719                     assert_eq!(args.len(), 2);
720                     let bin_op = match intrinsic {
721                         "unchecked_div" => BinOp::Div,
722                         "unchecked_rem" => BinOp::Rem,
723                         "unchecked_shl" => BinOp::Shl,
724                         "unchecked_shr" => BinOp::Shr,
725                         _ => unimplemented!("intrinsic {}", intrinsic),
726                     };
727                     let res = match ret.layout().ty.sty {
728                         ty::Uint(_) => crate::base::trans_int_binop(
729                             fx,
730                             bin_op,
731                             args[0],
732                             args[1],
733                             ret.layout().ty,
734                             false,
735                         ),
736                         ty::Int(_) => crate::base::trans_int_binop(
737                             fx,
738                             bin_op,
739                             args[0],
740                             args[1],
741                             ret.layout().ty,
742                             true,
743                         ),
744                         _ => panic!(),
745                     };
746                     ret.write_cvalue(fx, res);
747                 }
748                 _ if intrinsic.ends_with("_with_overflow") => {
749                     assert_eq!(args.len(), 2);
750                     assert_eq!(args[0].layout().ty, args[1].layout().ty);
751                     let bin_op = match intrinsic {
752                         "add_with_overflow" => BinOp::Add,
753                         "sub_with_overflow" => BinOp::Sub,
754                         "mul_with_overflow" => BinOp::Mul,
755                         _ => unimplemented!("intrinsic {}", intrinsic),
756                     };
757                     let res = match args[0].layout().ty.sty {
758                         ty::Uint(_) => crate::base::trans_checked_int_binop(
759                             fx,
760                             bin_op,
761                             args[0],
762                             args[1],
763                             ret.layout().ty,
764                             false,
765                         ),
766                         ty::Int(_) => crate::base::trans_checked_int_binop(
767                             fx,
768                             bin_op,
769                             args[0],
770                             args[1],
771                             ret.layout().ty,
772                             true,
773                         ),
774                         _ => panic!(),
775                     };
776                     ret.write_cvalue(fx, res);
777                 }
778                 _ if intrinsic.starts_with("overflowing_") => {
779                     assert_eq!(args.len(), 2);
780                     assert_eq!(args[0].layout().ty, args[1].layout().ty);
781                     let bin_op = match intrinsic {
782                         "overflowing_add" => BinOp::Add,
783                         "overflowing_sub" => BinOp::Sub,
784                         "overflowing_mul" => BinOp::Mul,
785                         _ => unimplemented!("intrinsic {}", intrinsic),
786                     };
787                     let res = match args[0].layout().ty.sty {
788                         ty::Uint(_) => crate::base::trans_int_binop(
789                             fx,
790                             bin_op,
791                             args[0],
792                             args[1],
793                             ret.layout().ty,
794                             false,
795                         ),
796                         ty::Int(_) => crate::base::trans_int_binop(
797                             fx,
798                             bin_op,
799                             args[0],
800                             args[1],
801                             ret.layout().ty,
802                             true,
803                         ),
804                         _ => panic!(),
805                     };
806                     ret.write_cvalue(fx, res);
807                 }
808                 "offset" => {
809                     assert_eq!(args.len(), 2);
810                     let base = args[0].load_value(fx);
811                     let offset = args[1].load_value(fx);
812                     let res = fx.bcx.ins().iadd(base, offset);
813                     ret.write_cvalue(fx, CValue::ByVal(res, args[0].layout()));
814                 }
815                 "transmute" => {
816                     assert_eq!(args.len(), 1);
817                     let src_ty = substs.type_at(0);
818                     let dst_ty = substs.type_at(1);
819                     assert_eq!(args[0].layout().ty, src_ty);
820                     let addr = args[0].force_stack(fx);
821                     let dst_layout = fx.layout_of(dst_ty);
822                     ret.write_cvalue(fx, CValue::ByRef(addr, dst_layout))
823                 }
824                 "uninit" => {
825                     assert_eq!(args.len(), 0);
826                     let ty = substs.type_at(0);
827                     let layout = fx.layout_of(ty);
828                     let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
829                         kind: StackSlotKind::ExplicitSlot,
830                         size: layout.size.bytes() as u32,
831                         offset: None,
832                     });
833
834                     let uninit_place = CPlace::from_stack_slot(fx, stack_slot, ty);
835                     let uninit_val = uninit_place.to_cvalue(fx);
836                     ret.write_cvalue(fx, uninit_val);
837                 }
838                 "ctlz" | "ctlz_nonzero" => {
839                     assert_eq!(args.len(), 1);
840                     let arg = args[0].load_value(fx);
841                     let res = CValue::ByVal(fx.bcx.ins().clz(arg), args[0].layout());
842                     ret.write_cvalue(fx, res);
843                 }
844                 "cttz" | "cttz_nonzero" => {
845                     assert_eq!(args.len(), 1);
846                     let arg = args[0].load_value(fx);
847                     let res = CValue::ByVal(fx.bcx.ins().clz(arg), args[0].layout());
848                     ret.write_cvalue(fx, res);
849                 }
850                 "ctpop" => {
851                     assert_eq!(args.len(), 1);
852                     let arg = args[0].load_value(fx);
853                     let res = CValue::ByVal(fx.bcx.ins().popcnt(arg), args[0].layout());
854                     ret.write_cvalue(fx, res);
855                 }
856                 "needs_drop" => {
857                     assert_eq!(args.len(), 0);
858                     let ty = substs.type_at(0);
859                     let needs_drop = if ty.needs_drop(fx.tcx, ParamEnv::reveal_all()) {
860                         1
861                     } else {
862                         0
863                     };
864                     let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
865                     ret.write_cvalue(fx, needs_drop);
866                 }
867                 _ if intrinsic.starts_with("atomic_fence") => {}
868                 _ if intrinsic.starts_with("atomic_load") => {
869                     assert_eq!(args.len(), 1);
870                     let inner_layout =
871                         fx.layout_of(args[0].layout().ty.builtin_deref(true).unwrap().ty);
872                     let val = CValue::ByRef(args[0].load_value(fx), inner_layout);
873                     ret.write_cvalue(fx, val);
874                 }
875                 _ if intrinsic.starts_with("atomic_store") => {
876                     assert_eq!(args.len(), 2);
877                     let dest = CPlace::Addr(args[0].load_value(fx), None, args[1].layout());
878                     dest.write_cvalue(fx, args[1]);
879                 }
880                 _ if intrinsic.starts_with("atomic_xadd") => {
881                     assert_eq!(args.len(), 2);
882                     let clif_ty = fx.cton_type(substs.type_at(0)).unwrap();
883                     let ptr = args[0].load_value(fx);
884                     let amount = args[1].load_value(fx);
885                     let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
886                     let new = fx.bcx.ins().iadd(old, amount);
887                     fx.bcx.ins().store(MemFlags::new(), ptr, new, 0);
888                     ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(substs.type_at(0))));
889                 }
890                 _ if intrinsic.starts_with("atomic_xsub") => {
891                     assert_eq!(args.len(), 2);
892                     let clif_ty = fx.cton_type(substs.type_at(0)).unwrap();
893                     let ptr = args[0].load_value(fx);
894                     let amount = args[1].load_value(fx);
895                     let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
896                     let new = fx.bcx.ins().isub(old, amount);
897                     fx.bcx.ins().store(MemFlags::new(), ptr, new, 0);
898                     ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(substs.type_at(0))));
899                 }
900                 _ => unimpl!("unsupported intrinsic {}", intrinsic),
901             }
902
903             if let Some((_, dest)) = destination {
904                 let ret_ebb = fx.get_ebb(dest);
905                 fx.bcx.ins().jump(ret_ebb, &[]);
906             } else {
907                 fx.bcx.ins().trap(TrapCode::User(!0));
908             }
909             return true;
910         }
911     }
912
913     false
914 }