]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/inline_asm.rs
Auto merge of #104334 - compiler-errors:ufcs-sugg-wrong-def-id, r=estebank
[rust.git] / compiler / rustc_codegen_cranelift / src / inline_asm.rs
1 //! Codegen of `asm!` invocations.
2
3 use crate::prelude::*;
4
5 use std::fmt::Write;
6
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_span::sym;
10 use rustc_target::asm::*;
11
12 pub(crate) fn codegen_inline_asm<'tcx>(
13     fx: &mut FunctionCx<'_, '_, 'tcx>,
14     _span: Span,
15     template: &[InlineAsmTemplatePiece],
16     operands: &[InlineAsmOperand<'tcx>],
17     options: InlineAsmOptions,
18     destination: Option<mir::BasicBlock>,
19 ) {
20     // FIXME add .eh_frame unwind info directives
21
22     if !template.is_empty() {
23         // Used by panic_abort
24         if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
25             fx.bcx.ins().trap(TrapCode::User(1));
26             return;
27         }
28
29         // Used by stdarch
30         if template[0] == InlineAsmTemplatePiece::String("mov ".to_string())
31             && matches!(
32                 template[1],
33                 InlineAsmTemplatePiece::Placeholder {
34                     operand_idx: 0,
35                     modifier: Some('r'),
36                     span: _
37                 }
38             )
39             && template[2] == InlineAsmTemplatePiece::String(", rbx".to_string())
40             && template[3] == InlineAsmTemplatePiece::String("\n".to_string())
41             && template[4] == InlineAsmTemplatePiece::String("cpuid".to_string())
42             && template[5] == InlineAsmTemplatePiece::String("\n".to_string())
43             && template[6] == InlineAsmTemplatePiece::String("xchg ".to_string())
44             && matches!(
45                 template[7],
46                 InlineAsmTemplatePiece::Placeholder {
47                     operand_idx: 0,
48                     modifier: Some('r'),
49                     span: _
50                 }
51             )
52             && template[8] == InlineAsmTemplatePiece::String(", rbx".to_string())
53         {
54             assert_eq!(operands.len(), 4);
55             let (leaf, eax_place) = match operands[1] {
56                 InlineAsmOperand::InOut {
57                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
58                     late: _,
59                     ref in_value,
60                     out_place: Some(out_place),
61                 } => (
62                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
63                     crate::base::codegen_place(fx, out_place),
64                 ),
65                 _ => unreachable!(),
66             };
67             let ebx_place = match operands[0] {
68                 InlineAsmOperand::Out {
69                     reg:
70                         InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
71                             X86InlineAsmRegClass::reg,
72                         )),
73                     late: _,
74                     place: Some(place),
75                 } => crate::base::codegen_place(fx, place),
76                 _ => unreachable!(),
77             };
78             let (sub_leaf, ecx_place) = match operands[2] {
79                 InlineAsmOperand::InOut {
80                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
81                     late: _,
82                     ref in_value,
83                     out_place: Some(out_place),
84                 } => (
85                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
86                     crate::base::codegen_place(fx, out_place),
87                 ),
88                 _ => unreachable!(),
89             };
90             let edx_place = match operands[3] {
91                 InlineAsmOperand::Out {
92                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
93                     late: _,
94                     place: Some(place),
95                 } => crate::base::codegen_place(fx, place),
96                 _ => unreachable!(),
97             };
98
99             let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
100
101             eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
102             ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
103             ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
104             edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
105             let destination_block = fx.get_block(destination.unwrap());
106             fx.bcx.ins().jump(destination_block, &[]);
107             return;
108         }
109
110         // Used by compiler-builtins
111         if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
112             // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
113             crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
114             return;
115         } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
116             crate::trap::trap_unimplemented(fx, "Alloca is not supported");
117             return;
118         }
119
120         // Used by measureme
121         if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
122             && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
123             && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
124             && matches!(
125                 template[3],
126                 InlineAsmTemplatePiece::Placeholder {
127                     operand_idx: 0,
128                     modifier: Some('r'),
129                     span: _
130                 }
131             )
132             && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
133             && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
134             && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
135             && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
136             && matches!(
137                 template[8],
138                 InlineAsmTemplatePiece::Placeholder {
139                     operand_idx: 0,
140                     modifier: Some('r'),
141                     span: _
142                 }
143             )
144             && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
145         {
146             let destination_block = fx.get_block(destination.unwrap());
147             fx.bcx.ins().jump(destination_block, &[]);
148             return;
149         } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
150             // Return zero dummy values for all performance counters
151             match operands[0] {
152                 InlineAsmOperand::In {
153                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
154                     value: _,
155                 } => {}
156                 _ => unreachable!(),
157             };
158             let lo = match operands[1] {
159                 InlineAsmOperand::Out {
160                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
161                     late: true,
162                     place: Some(place),
163                 } => crate::base::codegen_place(fx, place),
164                 _ => unreachable!(),
165             };
166             let hi = match operands[2] {
167                 InlineAsmOperand::Out {
168                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
169                     late: true,
170                     place: Some(place),
171                 } => crate::base::codegen_place(fx, place),
172                 _ => unreachable!(),
173             };
174
175             let u32_layout = fx.layout_of(fx.tcx.types.u32);
176             let zero = fx.bcx.ins().iconst(types::I32, 0);
177             lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
178             hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
179
180             let destination_block = fx.get_block(destination.unwrap());
181             fx.bcx.ins().jump(destination_block, &[]);
182             return;
183         } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
184             && matches!(
185                 template[1],
186                 InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
187             )
188             && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
189             && matches!(
190                 template[3],
191                 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
192             )
193             && template[4] == InlineAsmTemplatePiece::String(")".to_string())
194         {
195             let destination_block = fx.get_block(destination.unwrap());
196             fx.bcx.ins().jump(destination_block, &[]);
197             return;
198         }
199     }
200
201     let mut inputs = Vec::new();
202     let mut outputs = Vec::new();
203
204     let mut asm_gen = InlineAssemblyGenerator {
205         tcx: fx.tcx,
206         arch: fx.tcx.sess.asm_arch.unwrap(),
207         enclosing_def_id: fx.instance.def_id(),
208         template,
209         operands,
210         options,
211         registers: Vec::new(),
212         stack_slots_clobber: Vec::new(),
213         stack_slots_input: Vec::new(),
214         stack_slots_output: Vec::new(),
215         stack_slot_size: Size::from_bytes(0),
216     };
217     asm_gen.allocate_registers();
218     asm_gen.allocate_stack_slots();
219
220     let inline_asm_index = fx.cx.inline_asm_index.get();
221     fx.cx.inline_asm_index.set(inline_asm_index + 1);
222     let asm_name = format!(
223         "__inline_asm_{}_n{}",
224         fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
225         inline_asm_index
226     );
227
228     let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
229     fx.cx.global_asm.push_str(&generated_asm);
230
231     for (i, operand) in operands.iter().enumerate() {
232         match *operand {
233             InlineAsmOperand::In { reg: _, ref value } => {
234                 inputs.push((
235                     asm_gen.stack_slots_input[i].unwrap(),
236                     crate::base::codegen_operand(fx, value).load_scalar(fx),
237                 ));
238             }
239             InlineAsmOperand::Out { reg: _, late: _, place } => {
240                 if let Some(place) = place {
241                     outputs.push((
242                         asm_gen.stack_slots_output[i].unwrap(),
243                         crate::base::codegen_place(fx, place),
244                     ));
245                 }
246             }
247             InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
248                 inputs.push((
249                     asm_gen.stack_slots_input[i].unwrap(),
250                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
251                 ));
252                 if let Some(out_place) = out_place {
253                     outputs.push((
254                         asm_gen.stack_slots_output[i].unwrap(),
255                         crate::base::codegen_place(fx, out_place),
256                     ));
257                 }
258             }
259             InlineAsmOperand::Const { value: _ } => todo!(),
260             InlineAsmOperand::SymFn { value: _ } => todo!(),
261             InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
262         }
263     }
264
265     call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
266
267     match destination {
268         Some(destination) => {
269             let destination_block = fx.get_block(destination);
270             fx.bcx.ins().jump(destination_block, &[]);
271         }
272         None => {
273             fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
274         }
275     }
276 }
277
278 struct InlineAssemblyGenerator<'a, 'tcx> {
279     tcx: TyCtxt<'tcx>,
280     arch: InlineAsmArch,
281     enclosing_def_id: DefId,
282     template: &'a [InlineAsmTemplatePiece],
283     operands: &'a [InlineAsmOperand<'tcx>],
284     options: InlineAsmOptions,
285     registers: Vec<Option<InlineAsmReg>>,
286     stack_slots_clobber: Vec<Option<Size>>,
287     stack_slots_input: Vec<Option<Size>>,
288     stack_slots_output: Vec<Option<Size>>,
289     stack_slot_size: Size,
290 }
291
292 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
293     fn allocate_registers(&mut self) {
294         let sess = self.tcx.sess;
295         let map = allocatable_registers(
296             self.arch,
297             sess.relocation_model(),
298             self.tcx.asm_target_features(self.enclosing_def_id),
299             &sess.target,
300         );
301         let mut allocated = FxHashMap::<_, (bool, bool)>::default();
302         let mut regs = vec![None; self.operands.len()];
303
304         // Add explicit registers to the allocated set.
305         for (i, operand) in self.operands.iter().enumerate() {
306             match *operand {
307                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
308                     regs[i] = Some(reg);
309                     allocated.entry(reg).or_default().0 = true;
310                 }
311                 InlineAsmOperand::Out {
312                     reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
313                 } => {
314                     regs[i] = Some(reg);
315                     allocated.entry(reg).or_default().1 = true;
316                 }
317                 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
318                 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
319                     regs[i] = Some(reg);
320                     allocated.insert(reg, (true, true));
321                 }
322                 _ => (),
323             }
324         }
325
326         // Allocate out/inout/inlateout registers first because they are more constrained.
327         for (i, operand) in self.operands.iter().enumerate() {
328             match *operand {
329                 InlineAsmOperand::Out {
330                     reg: InlineAsmRegOrRegClass::RegClass(class),
331                     late: false,
332                     ..
333                 }
334                 | InlineAsmOperand::InOut {
335                     reg: InlineAsmRegOrRegClass::RegClass(class), ..
336                 } => {
337                     let mut alloc_reg = None;
338                     for &reg in &map[&class] {
339                         let mut used = false;
340                         reg.overlapping_regs(|r| {
341                             if allocated.contains_key(&r) {
342                                 used = true;
343                             }
344                         });
345
346                         if !used {
347                             alloc_reg = Some(reg);
348                             break;
349                         }
350                     }
351
352                     let reg = alloc_reg.expect("cannot allocate registers");
353                     regs[i] = Some(reg);
354                     allocated.insert(reg, (true, true));
355                 }
356                 _ => (),
357             }
358         }
359
360         // Allocate in/lateout.
361         for (i, operand) in self.operands.iter().enumerate() {
362             match *operand {
363                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
364                     let mut alloc_reg = None;
365                     for &reg in &map[&class] {
366                         let mut used = false;
367                         reg.overlapping_regs(|r| {
368                             if allocated.get(&r).copied().unwrap_or_default().0 {
369                                 used = true;
370                             }
371                         });
372
373                         if !used {
374                             alloc_reg = Some(reg);
375                             break;
376                         }
377                     }
378
379                     let reg = alloc_reg.expect("cannot allocate registers");
380                     regs[i] = Some(reg);
381                     allocated.entry(reg).or_default().0 = true;
382                 }
383                 InlineAsmOperand::Out {
384                     reg: InlineAsmRegOrRegClass::RegClass(class),
385                     late: true,
386                     ..
387                 } => {
388                     let mut alloc_reg = None;
389                     for &reg in &map[&class] {
390                         let mut used = false;
391                         reg.overlapping_regs(|r| {
392                             if allocated.get(&r).copied().unwrap_or_default().1 {
393                                 used = true;
394                             }
395                         });
396
397                         if !used {
398                             alloc_reg = Some(reg);
399                             break;
400                         }
401                     }
402
403                     let reg = alloc_reg.expect("cannot allocate registers");
404                     regs[i] = Some(reg);
405                     allocated.entry(reg).or_default().1 = true;
406                 }
407                 _ => (),
408             }
409         }
410
411         self.registers = regs;
412     }
413
414     fn allocate_stack_slots(&mut self) {
415         let mut slot_size = Size::from_bytes(0);
416         let mut slots_clobber = vec![None; self.operands.len()];
417         let mut slots_input = vec![None; self.operands.len()];
418         let mut slots_output = vec![None; self.operands.len()];
419
420         let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
421             let reg_size =
422                 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
423             let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
424             let offset = slot_size.align_to(align);
425             *slot_size = offset + reg_size;
426             offset
427         };
428         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
429
430         // Allocate stack slots for saving clobbered registers
431         let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
432             .unwrap()
433             .clobbered_regs();
434         for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
435             let mut need_save = true;
436             // If the register overlaps with a register clobbered by function call, then
437             // we don't need to save it.
438             for r in abi_clobber {
439                 r.overlapping_regs(|r| {
440                     if r == reg {
441                         need_save = false;
442                     }
443                 });
444
445                 if !need_save {
446                     break;
447                 }
448             }
449
450             if need_save {
451                 slots_clobber[i] = Some(new_slot(reg.reg_class()));
452             }
453         }
454
455         // Allocate stack slots for inout
456         for (i, operand) in self.operands.iter().enumerate() {
457             match *operand {
458                 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
459                     let slot = new_slot(reg.reg_class());
460                     slots_input[i] = Some(slot);
461                     slots_output[i] = Some(slot);
462                 }
463                 _ => (),
464             }
465         }
466
467         let slot_size_before_input = slot_size;
468         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
469
470         // Allocate stack slots for input
471         for (i, operand) in self.operands.iter().enumerate() {
472             match *operand {
473                 InlineAsmOperand::In { reg, .. }
474                 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
475                     slots_input[i] = Some(new_slot(reg.reg_class()));
476                 }
477                 _ => (),
478             }
479         }
480
481         // Reset slot size to before input so that input and output operands can overlap
482         // and save some memory.
483         let slot_size_after_input = slot_size;
484         slot_size = slot_size_before_input;
485         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
486
487         // Allocate stack slots for output
488         for (i, operand) in self.operands.iter().enumerate() {
489             match *operand {
490                 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
491                     slots_output[i] = Some(new_slot(reg.reg_class()));
492                 }
493                 _ => (),
494             }
495         }
496
497         slot_size = slot_size.max(slot_size_after_input);
498
499         self.stack_slots_clobber = slots_clobber;
500         self.stack_slots_input = slots_input;
501         self.stack_slots_output = slots_output;
502         self.stack_slot_size = slot_size;
503     }
504
505     fn generate_asm_wrapper(&self, asm_name: &str) -> String {
506         let mut generated_asm = String::new();
507         writeln!(generated_asm, ".globl {}", asm_name).unwrap();
508         writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
509         writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
510         writeln!(generated_asm, "{}:", asm_name).unwrap();
511
512         let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
513
514         if is_x86 {
515             generated_asm.push_str(".intel_syntax noprefix\n");
516         }
517         Self::prologue(&mut generated_asm, self.arch);
518
519         // Save clobbered registers
520         if !self.options.contains(InlineAsmOptions::NORETURN) {
521             for (reg, slot) in self
522                 .registers
523                 .iter()
524                 .zip(self.stack_slots_clobber.iter().copied())
525                 .filter_map(|(r, s)| r.zip(s))
526             {
527                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
528             }
529         }
530
531         // Write input registers
532         for (reg, slot) in self
533             .registers
534             .iter()
535             .zip(self.stack_slots_input.iter().copied())
536             .filter_map(|(r, s)| r.zip(s))
537         {
538             Self::restore_register(&mut generated_asm, self.arch, reg, slot);
539         }
540
541         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
542             generated_asm.push_str(".att_syntax\n");
543         }
544
545         // The actual inline asm
546         for piece in self.template {
547             match piece {
548                 InlineAsmTemplatePiece::String(s) => {
549                     generated_asm.push_str(s);
550                 }
551                 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
552                     if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
553                         generated_asm.push('%');
554                     }
555                     self.registers[*operand_idx]
556                         .unwrap()
557                         .emit(&mut generated_asm, self.arch, *modifier)
558                         .unwrap();
559                 }
560             }
561         }
562         generated_asm.push('\n');
563
564         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
565             generated_asm.push_str(".intel_syntax noprefix\n");
566         }
567
568         if !self.options.contains(InlineAsmOptions::NORETURN) {
569             // Read output registers
570             for (reg, slot) in self
571                 .registers
572                 .iter()
573                 .zip(self.stack_slots_output.iter().copied())
574                 .filter_map(|(r, s)| r.zip(s))
575             {
576                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
577             }
578
579             // Restore clobbered registers
580             for (reg, slot) in self
581                 .registers
582                 .iter()
583                 .zip(self.stack_slots_clobber.iter().copied())
584                 .filter_map(|(r, s)| r.zip(s))
585             {
586                 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
587             }
588
589             Self::epilogue(&mut generated_asm, self.arch);
590         } else {
591             Self::epilogue_noreturn(&mut generated_asm, self.arch);
592         }
593
594         if is_x86 {
595             generated_asm.push_str(".att_syntax\n");
596         }
597         writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
598         generated_asm.push_str(".text\n");
599         generated_asm.push_str("\n\n");
600
601         generated_asm
602     }
603
604     fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
605         match arch {
606             InlineAsmArch::X86 => {
607                 generated_asm.push_str("    push ebp\n");
608                 generated_asm.push_str("    mov ebp,[esp+8]\n");
609             }
610             InlineAsmArch::X86_64 => {
611                 generated_asm.push_str("    push rbp\n");
612                 generated_asm.push_str("    mov rbp,rdi\n");
613             }
614             InlineAsmArch::RiscV32 => {
615                 generated_asm.push_str("    addi sp, sp, -8\n");
616                 generated_asm.push_str("    sw ra, 4(sp)\n");
617                 generated_asm.push_str("    sw s0, 0(sp)\n");
618                 generated_asm.push_str("    mv s0, a0\n");
619             }
620             InlineAsmArch::RiscV64 => {
621                 generated_asm.push_str("    addi sp, sp, -16\n");
622                 generated_asm.push_str("    sd ra, 8(sp)\n");
623                 generated_asm.push_str("    sd s0, 0(sp)\n");
624                 generated_asm.push_str("    mv s0, a0\n");
625             }
626             _ => unimplemented!("prologue for {:?}", arch),
627         }
628     }
629
630     fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
631         match arch {
632             InlineAsmArch::X86 => {
633                 generated_asm.push_str("    pop ebp\n");
634                 generated_asm.push_str("    ret\n");
635             }
636             InlineAsmArch::X86_64 => {
637                 generated_asm.push_str("    pop rbp\n");
638                 generated_asm.push_str("    ret\n");
639             }
640             InlineAsmArch::RiscV32 => {
641                 generated_asm.push_str("    lw s0, 0(sp)\n");
642                 generated_asm.push_str("    lw ra, 4(sp)\n");
643                 generated_asm.push_str("    addi sp, sp, 8\n");
644                 generated_asm.push_str("    ret\n");
645             }
646             InlineAsmArch::RiscV64 => {
647                 generated_asm.push_str("    ld s0, 0(sp)\n");
648                 generated_asm.push_str("    ld ra, 8(sp)\n");
649                 generated_asm.push_str("    addi sp, sp, 16\n");
650                 generated_asm.push_str("    ret\n");
651             }
652             _ => unimplemented!("epilogue for {:?}", arch),
653         }
654     }
655
656     fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
657         match arch {
658             InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
659                 generated_asm.push_str("    ud2\n");
660             }
661             InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
662                 generated_asm.push_str("    ebreak\n");
663             }
664             _ => unimplemented!("epilogue_noreturn for {:?}", arch),
665         }
666     }
667
668     fn save_register(
669         generated_asm: &mut String,
670         arch: InlineAsmArch,
671         reg: InlineAsmReg,
672         offset: Size,
673     ) {
674         match arch {
675             InlineAsmArch::X86 => {
676                 write!(generated_asm, "    mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
677                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
678                 generated_asm.push('\n');
679             }
680             InlineAsmArch::X86_64 => {
681                 write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
682                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
683                 generated_asm.push('\n');
684             }
685             InlineAsmArch::RiscV32 => {
686                 generated_asm.push_str("    sw ");
687                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
688                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
689             }
690             InlineAsmArch::RiscV64 => {
691                 generated_asm.push_str("    sd ");
692                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
693                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
694             }
695             _ => unimplemented!("save_register for {:?}", arch),
696         }
697     }
698
699     fn restore_register(
700         generated_asm: &mut String,
701         arch: InlineAsmArch,
702         reg: InlineAsmReg,
703         offset: Size,
704     ) {
705         match arch {
706             InlineAsmArch::X86 => {
707                 generated_asm.push_str("    mov ");
708                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
709                 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
710             }
711             InlineAsmArch::X86_64 => {
712                 generated_asm.push_str("    mov ");
713                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
714                 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
715             }
716             InlineAsmArch::RiscV32 => {
717                 generated_asm.push_str("    lw ");
718                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
719                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
720             }
721             InlineAsmArch::RiscV64 => {
722                 generated_asm.push_str("    ld ");
723                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
724                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
725             }
726             _ => unimplemented!("restore_register for {:?}", arch),
727         }
728     }
729 }
730
731 fn call_inline_asm<'tcx>(
732     fx: &mut FunctionCx<'_, '_, 'tcx>,
733     asm_name: &str,
734     slot_size: Size,
735     inputs: Vec<(Size, Value)>,
736     outputs: Vec<(Size, CPlace<'tcx>)>,
737 ) {
738     let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
739         kind: StackSlotKind::ExplicitSlot,
740         size: u32::try_from(slot_size.bytes()).unwrap(),
741     });
742     if fx.clif_comments.enabled() {
743         fx.add_comment(stack_slot, "inline asm scratch slot");
744     }
745
746     let inline_asm_func = fx
747         .module
748         .declare_function(
749             asm_name,
750             Linkage::Import,
751             &Signature {
752                 call_conv: CallConv::SystemV,
753                 params: vec![AbiParam::new(fx.pointer_type)],
754                 returns: vec![],
755             },
756         )
757         .unwrap();
758     let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
759     if fx.clif_comments.enabled() {
760         fx.add_comment(inline_asm_func, asm_name);
761     }
762
763     for (offset, value) in inputs {
764         fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
765     }
766
767     let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
768     fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
769
770     for (offset, place) in outputs {
771         let ty = fx.clif_type(place.layout().ty).unwrap();
772         let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
773         place.write_cvalue(fx, CValue::by_val(value, place.layout()));
774     }
775 }