]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/inline_asm.rs
Auto merge of #103431 - Dylan-DPC:rollup-oozfo89, r=Dylan-DPC
[rust.git] / compiler / rustc_codegen_cranelift / src / inline_asm.rs
1 //! Codegen of `asm!` invocations.
2
3 use crate::prelude::*;
4
5 use std::fmt::Write;
6
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_span::sym;
10 use rustc_target::asm::*;
11
12 pub(crate) fn codegen_inline_asm<'tcx>(
13     fx: &mut FunctionCx<'_, '_, 'tcx>,
14     _span: Span,
15     template: &[InlineAsmTemplatePiece],
16     operands: &[InlineAsmOperand<'tcx>],
17     options: InlineAsmOptions,
18     destination: Option<mir::BasicBlock>,
19 ) {
20     // FIXME add .eh_frame unwind info directives
21
22     if !template.is_empty() {
23         // Used by panic_abort
24         if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
25             fx.bcx.ins().trap(TrapCode::User(1));
26             return;
27         }
28
29         // Used by stdarch
30         if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
31             && matches!(
32                 template[1],
33                 InlineAsmTemplatePiece::Placeholder {
34                     operand_idx: 0,
35                     modifier: Some('r'),
36                     span: _
37                 }
38             )
39             && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
40             && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
41             && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
42             && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
43             && matches!(
44                 template[6],
45                 InlineAsmTemplatePiece::Placeholder {
46                     operand_idx: 0,
47                     modifier: Some('r'),
48                     span: _
49                 }
50             )
51         {
52             assert_eq!(operands.len(), 4);
53             let (leaf, eax_place) = match operands[1] {
54                 InlineAsmOperand::InOut {
55                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
56                     late: true,
57                     ref in_value,
58                     out_place: Some(out_place),
59                 } => (
60                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
61                     crate::base::codegen_place(fx, out_place),
62                 ),
63                 _ => unreachable!(),
64             };
65             let ebx_place = match operands[0] {
66                 InlineAsmOperand::Out {
67                     reg:
68                         InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
69                             X86InlineAsmRegClass::reg,
70                         )),
71                     late: true,
72                     place: Some(place),
73                 } => crate::base::codegen_place(fx, place),
74                 _ => unreachable!(),
75             };
76             let (sub_leaf, ecx_place) = match operands[2] {
77                 InlineAsmOperand::InOut {
78                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
79                     late: true,
80                     ref in_value,
81                     out_place: Some(out_place),
82                 } => (
83                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
84                     crate::base::codegen_place(fx, out_place),
85                 ),
86                 _ => unreachable!(),
87             };
88             let edx_place = match operands[3] {
89                 InlineAsmOperand::Out {
90                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
91                     late: true,
92                     place: Some(place),
93                 } => crate::base::codegen_place(fx, place),
94                 _ => unreachable!(),
95             };
96
97             let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
98
99             eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
100             ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
101             ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
102             edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
103             let destination_block = fx.get_block(destination.unwrap());
104             fx.bcx.ins().jump(destination_block, &[]);
105             return;
106         }
107
108         // Used by compiler-builtins
109         if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
110             // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
111             crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
112             return;
113         } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
114             crate::trap::trap_unimplemented(fx, "Alloca is not supported");
115             return;
116         }
117
118         // Used by measureme
119         if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
120             && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
121             && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
122             && matches!(
123                 template[3],
124                 InlineAsmTemplatePiece::Placeholder {
125                     operand_idx: 0,
126                     modifier: Some('r'),
127                     span: _
128                 }
129             )
130             && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
131             && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
132             && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
133             && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
134             && matches!(
135                 template[8],
136                 InlineAsmTemplatePiece::Placeholder {
137                     operand_idx: 0,
138                     modifier: Some('r'),
139                     span: _
140                 }
141             )
142             && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
143         {
144             let destination_block = fx.get_block(destination.unwrap());
145             fx.bcx.ins().jump(destination_block, &[]);
146             return;
147         } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
148             // Return zero dummy values for all performance counters
149             match operands[0] {
150                 InlineAsmOperand::In {
151                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
152                     value: _,
153                 } => {}
154                 _ => unreachable!(),
155             };
156             let lo = match operands[1] {
157                 InlineAsmOperand::Out {
158                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
159                     late: true,
160                     place: Some(place),
161                 } => crate::base::codegen_place(fx, place),
162                 _ => unreachable!(),
163             };
164             let hi = match operands[2] {
165                 InlineAsmOperand::Out {
166                     reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
167                     late: true,
168                     place: Some(place),
169                 } => crate::base::codegen_place(fx, place),
170                 _ => unreachable!(),
171             };
172
173             let u32_layout = fx.layout_of(fx.tcx.types.u32);
174             let zero = fx.bcx.ins().iconst(types::I32, 0);
175             lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
176             hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
177
178             let destination_block = fx.get_block(destination.unwrap());
179             fx.bcx.ins().jump(destination_block, &[]);
180             return;
181         } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
182             && matches!(
183                 template[1],
184                 InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
185             )
186             && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
187             && matches!(
188                 template[3],
189                 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
190             )
191             && template[4] == InlineAsmTemplatePiece::String(")".to_string())
192         {
193             let destination_block = fx.get_block(destination.unwrap());
194             fx.bcx.ins().jump(destination_block, &[]);
195             return;
196         }
197     }
198
199     let mut inputs = Vec::new();
200     let mut outputs = Vec::new();
201
202     let mut asm_gen = InlineAssemblyGenerator {
203         tcx: fx.tcx,
204         arch: fx.tcx.sess.asm_arch.unwrap(),
205         enclosing_def_id: fx.instance.def_id(),
206         template,
207         operands,
208         options,
209         registers: Vec::new(),
210         stack_slots_clobber: Vec::new(),
211         stack_slots_input: Vec::new(),
212         stack_slots_output: Vec::new(),
213         stack_slot_size: Size::from_bytes(0),
214     };
215     asm_gen.allocate_registers();
216     asm_gen.allocate_stack_slots();
217
218     let inline_asm_index = fx.cx.inline_asm_index.get();
219     fx.cx.inline_asm_index.set(inline_asm_index + 1);
220     let asm_name = format!(
221         "__inline_asm_{}_n{}",
222         fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
223         inline_asm_index
224     );
225
226     let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
227     fx.cx.global_asm.push_str(&generated_asm);
228
229     for (i, operand) in operands.iter().enumerate() {
230         match *operand {
231             InlineAsmOperand::In { reg: _, ref value } => {
232                 inputs.push((
233                     asm_gen.stack_slots_input[i].unwrap(),
234                     crate::base::codegen_operand(fx, value).load_scalar(fx),
235                 ));
236             }
237             InlineAsmOperand::Out { reg: _, late: _, place } => {
238                 if let Some(place) = place {
239                     outputs.push((
240                         asm_gen.stack_slots_output[i].unwrap(),
241                         crate::base::codegen_place(fx, place),
242                     ));
243                 }
244             }
245             InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
246                 inputs.push((
247                     asm_gen.stack_slots_input[i].unwrap(),
248                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
249                 ));
250                 if let Some(out_place) = out_place {
251                     outputs.push((
252                         asm_gen.stack_slots_output[i].unwrap(),
253                         crate::base::codegen_place(fx, out_place),
254                     ));
255                 }
256             }
257             InlineAsmOperand::Const { value: _ } => todo!(),
258             InlineAsmOperand::SymFn { value: _ } => todo!(),
259             InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
260         }
261     }
262
263     call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
264
265     match destination {
266         Some(destination) => {
267             let destination_block = fx.get_block(destination);
268             fx.bcx.ins().jump(destination_block, &[]);
269         }
270         None => {
271             fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
272         }
273     }
274 }
275
276 struct InlineAssemblyGenerator<'a, 'tcx> {
277     tcx: TyCtxt<'tcx>,
278     arch: InlineAsmArch,
279     enclosing_def_id: DefId,
280     template: &'a [InlineAsmTemplatePiece],
281     operands: &'a [InlineAsmOperand<'tcx>],
282     options: InlineAsmOptions,
283     registers: Vec<Option<InlineAsmReg>>,
284     stack_slots_clobber: Vec<Option<Size>>,
285     stack_slots_input: Vec<Option<Size>>,
286     stack_slots_output: Vec<Option<Size>>,
287     stack_slot_size: Size,
288 }
289
290 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
291     fn allocate_registers(&mut self) {
292         let sess = self.tcx.sess;
293         let map = allocatable_registers(
294             self.arch,
295             sess.relocation_model(),
296             self.tcx.asm_target_features(self.enclosing_def_id),
297             &sess.target,
298         );
299         let mut allocated = FxHashMap::<_, (bool, bool)>::default();
300         let mut regs = vec![None; self.operands.len()];
301
302         // Add explicit registers to the allocated set.
303         for (i, operand) in self.operands.iter().enumerate() {
304             match *operand {
305                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
306                     regs[i] = Some(reg);
307                     allocated.entry(reg).or_default().0 = true;
308                 }
309                 InlineAsmOperand::Out {
310                     reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
311                 } => {
312                     regs[i] = Some(reg);
313                     allocated.entry(reg).or_default().1 = true;
314                 }
315                 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
316                 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
317                     regs[i] = Some(reg);
318                     allocated.insert(reg, (true, true));
319                 }
320                 _ => (),
321             }
322         }
323
324         // Allocate out/inout/inlateout registers first because they are more constrained.
325         for (i, operand) in self.operands.iter().enumerate() {
326             match *operand {
327                 InlineAsmOperand::Out {
328                     reg: InlineAsmRegOrRegClass::RegClass(class),
329                     late: false,
330                     ..
331                 }
332                 | InlineAsmOperand::InOut {
333                     reg: InlineAsmRegOrRegClass::RegClass(class), ..
334                 } => {
335                     let mut alloc_reg = None;
336                     for &reg in &map[&class] {
337                         let mut used = false;
338                         reg.overlapping_regs(|r| {
339                             if allocated.contains_key(&r) {
340                                 used = true;
341                             }
342                         });
343
344                         if !used {
345                             alloc_reg = Some(reg);
346                             break;
347                         }
348                     }
349
350                     let reg = alloc_reg.expect("cannot allocate registers");
351                     regs[i] = Some(reg);
352                     allocated.insert(reg, (true, true));
353                 }
354                 _ => (),
355             }
356         }
357
358         // Allocate in/lateout.
359         for (i, operand) in self.operands.iter().enumerate() {
360             match *operand {
361                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
362                     let mut alloc_reg = None;
363                     for &reg in &map[&class] {
364                         let mut used = false;
365                         reg.overlapping_regs(|r| {
366                             if allocated.get(&r).copied().unwrap_or_default().0 {
367                                 used = true;
368                             }
369                         });
370
371                         if !used {
372                             alloc_reg = Some(reg);
373                             break;
374                         }
375                     }
376
377                     let reg = alloc_reg.expect("cannot allocate registers");
378                     regs[i] = Some(reg);
379                     allocated.entry(reg).or_default().0 = true;
380                 }
381                 InlineAsmOperand::Out {
382                     reg: InlineAsmRegOrRegClass::RegClass(class),
383                     late: true,
384                     ..
385                 } => {
386                     let mut alloc_reg = None;
387                     for &reg in &map[&class] {
388                         let mut used = false;
389                         reg.overlapping_regs(|r| {
390                             if allocated.get(&r).copied().unwrap_or_default().1 {
391                                 used = true;
392                             }
393                         });
394
395                         if !used {
396                             alloc_reg = Some(reg);
397                             break;
398                         }
399                     }
400
401                     let reg = alloc_reg.expect("cannot allocate registers");
402                     regs[i] = Some(reg);
403                     allocated.entry(reg).or_default().1 = true;
404                 }
405                 _ => (),
406             }
407         }
408
409         self.registers = regs;
410     }
411
412     fn allocate_stack_slots(&mut self) {
413         let mut slot_size = Size::from_bytes(0);
414         let mut slots_clobber = vec![None; self.operands.len()];
415         let mut slots_input = vec![None; self.operands.len()];
416         let mut slots_output = vec![None; self.operands.len()];
417
418         let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
419             let reg_size =
420                 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
421             let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
422             let offset = slot_size.align_to(align);
423             *slot_size = offset + reg_size;
424             offset
425         };
426         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
427
428         // Allocate stack slots for saving clobbered registers
429         let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
430             .unwrap()
431             .clobbered_regs();
432         for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
433             let mut need_save = true;
434             // If the register overlaps with a register clobbered by function call, then
435             // we don't need to save it.
436             for r in abi_clobber {
437                 r.overlapping_regs(|r| {
438                     if r == reg {
439                         need_save = false;
440                     }
441                 });
442
443                 if !need_save {
444                     break;
445                 }
446             }
447
448             if need_save {
449                 slots_clobber[i] = Some(new_slot(reg.reg_class()));
450             }
451         }
452
453         // Allocate stack slots for inout
454         for (i, operand) in self.operands.iter().enumerate() {
455             match *operand {
456                 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
457                     let slot = new_slot(reg.reg_class());
458                     slots_input[i] = Some(slot);
459                     slots_output[i] = Some(slot);
460                 }
461                 _ => (),
462             }
463         }
464
465         let slot_size_before_input = slot_size;
466         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
467
468         // Allocate stack slots for input
469         for (i, operand) in self.operands.iter().enumerate() {
470             match *operand {
471                 InlineAsmOperand::In { reg, .. }
472                 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
473                     slots_input[i] = Some(new_slot(reg.reg_class()));
474                 }
475                 _ => (),
476             }
477         }
478
479         // Reset slot size to before input so that input and output operands can overlap
480         // and save some memory.
481         let slot_size_after_input = slot_size;
482         slot_size = slot_size_before_input;
483         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
484
485         // Allocate stack slots for output
486         for (i, operand) in self.operands.iter().enumerate() {
487             match *operand {
488                 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
489                     slots_output[i] = Some(new_slot(reg.reg_class()));
490                 }
491                 _ => (),
492             }
493         }
494
495         slot_size = slot_size.max(slot_size_after_input);
496
497         self.stack_slots_clobber = slots_clobber;
498         self.stack_slots_input = slots_input;
499         self.stack_slots_output = slots_output;
500         self.stack_slot_size = slot_size;
501     }
502
503     fn generate_asm_wrapper(&self, asm_name: &str) -> String {
504         let mut generated_asm = String::new();
505         writeln!(generated_asm, ".globl {}", asm_name).unwrap();
506         writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
507         writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
508         writeln!(generated_asm, "{}:", asm_name).unwrap();
509
510         let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
511
512         if is_x86 {
513             generated_asm.push_str(".intel_syntax noprefix\n");
514         }
515         Self::prologue(&mut generated_asm, self.arch);
516
517         // Save clobbered registers
518         if !self.options.contains(InlineAsmOptions::NORETURN) {
519             for (reg, slot) in self
520                 .registers
521                 .iter()
522                 .zip(self.stack_slots_clobber.iter().copied())
523                 .filter_map(|(r, s)| r.zip(s))
524             {
525                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
526             }
527         }
528
529         // Write input registers
530         for (reg, slot) in self
531             .registers
532             .iter()
533             .zip(self.stack_slots_input.iter().copied())
534             .filter_map(|(r, s)| r.zip(s))
535         {
536             Self::restore_register(&mut generated_asm, self.arch, reg, slot);
537         }
538
539         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
540             generated_asm.push_str(".att_syntax\n");
541         }
542
543         // The actual inline asm
544         for piece in self.template {
545             match piece {
546                 InlineAsmTemplatePiece::String(s) => {
547                     generated_asm.push_str(s);
548                 }
549                 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
550                     if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
551                         generated_asm.push('%');
552                     }
553                     self.registers[*operand_idx]
554                         .unwrap()
555                         .emit(&mut generated_asm, self.arch, *modifier)
556                         .unwrap();
557                 }
558             }
559         }
560         generated_asm.push('\n');
561
562         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
563             generated_asm.push_str(".intel_syntax noprefix\n");
564         }
565
566         if !self.options.contains(InlineAsmOptions::NORETURN) {
567             // Read output registers
568             for (reg, slot) in self
569                 .registers
570                 .iter()
571                 .zip(self.stack_slots_output.iter().copied())
572                 .filter_map(|(r, s)| r.zip(s))
573             {
574                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
575             }
576
577             // Restore clobbered registers
578             for (reg, slot) in self
579                 .registers
580                 .iter()
581                 .zip(self.stack_slots_clobber.iter().copied())
582                 .filter_map(|(r, s)| r.zip(s))
583             {
584                 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
585             }
586
587             Self::epilogue(&mut generated_asm, self.arch);
588         } else {
589             Self::epilogue_noreturn(&mut generated_asm, self.arch);
590         }
591
592         if is_x86 {
593             generated_asm.push_str(".att_syntax\n");
594         }
595         writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
596         generated_asm.push_str(".text\n");
597         generated_asm.push_str("\n\n");
598
599         generated_asm
600     }
601
602     fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
603         match arch {
604             InlineAsmArch::X86 => {
605                 generated_asm.push_str("    push ebp\n");
606                 generated_asm.push_str("    mov ebp,[esp+8]\n");
607             }
608             InlineAsmArch::X86_64 => {
609                 generated_asm.push_str("    push rbp\n");
610                 generated_asm.push_str("    mov rbp,rdi\n");
611             }
612             InlineAsmArch::RiscV32 => {
613                 generated_asm.push_str("    addi sp, sp, -8\n");
614                 generated_asm.push_str("    sw ra, 4(sp)\n");
615                 generated_asm.push_str("    sw s0, 0(sp)\n");
616                 generated_asm.push_str("    mv s0, a0\n");
617             }
618             InlineAsmArch::RiscV64 => {
619                 generated_asm.push_str("    addi sp, sp, -16\n");
620                 generated_asm.push_str("    sd ra, 8(sp)\n");
621                 generated_asm.push_str("    sd s0, 0(sp)\n");
622                 generated_asm.push_str("    mv s0, a0\n");
623             }
624             _ => unimplemented!("prologue for {:?}", arch),
625         }
626     }
627
628     fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
629         match arch {
630             InlineAsmArch::X86 => {
631                 generated_asm.push_str("    pop ebp\n");
632                 generated_asm.push_str("    ret\n");
633             }
634             InlineAsmArch::X86_64 => {
635                 generated_asm.push_str("    pop rbp\n");
636                 generated_asm.push_str("    ret\n");
637             }
638             InlineAsmArch::RiscV32 => {
639                 generated_asm.push_str("    lw s0, 0(sp)\n");
640                 generated_asm.push_str("    lw ra, 4(sp)\n");
641                 generated_asm.push_str("    addi sp, sp, 8\n");
642                 generated_asm.push_str("    ret\n");
643             }
644             InlineAsmArch::RiscV64 => {
645                 generated_asm.push_str("    ld s0, 0(sp)\n");
646                 generated_asm.push_str("    ld ra, 8(sp)\n");
647                 generated_asm.push_str("    addi sp, sp, 16\n");
648                 generated_asm.push_str("    ret\n");
649             }
650             _ => unimplemented!("epilogue for {:?}", arch),
651         }
652     }
653
654     fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
655         match arch {
656             InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
657                 generated_asm.push_str("    ud2\n");
658             }
659             InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
660                 generated_asm.push_str("    ebreak\n");
661             }
662             _ => unimplemented!("epilogue_noreturn for {:?}", arch),
663         }
664     }
665
666     fn save_register(
667         generated_asm: &mut String,
668         arch: InlineAsmArch,
669         reg: InlineAsmReg,
670         offset: Size,
671     ) {
672         match arch {
673             InlineAsmArch::X86 => {
674                 write!(generated_asm, "    mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
675                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
676                 generated_asm.push('\n');
677             }
678             InlineAsmArch::X86_64 => {
679                 write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
680                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
681                 generated_asm.push('\n');
682             }
683             InlineAsmArch::RiscV32 => {
684                 generated_asm.push_str("    sw ");
685                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
686                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
687             }
688             InlineAsmArch::RiscV64 => {
689                 generated_asm.push_str("    sd ");
690                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
691                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
692             }
693             _ => unimplemented!("save_register for {:?}", arch),
694         }
695     }
696
697     fn restore_register(
698         generated_asm: &mut String,
699         arch: InlineAsmArch,
700         reg: InlineAsmReg,
701         offset: Size,
702     ) {
703         match arch {
704             InlineAsmArch::X86 => {
705                 generated_asm.push_str("    mov ");
706                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
707                 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
708             }
709             InlineAsmArch::X86_64 => {
710                 generated_asm.push_str("    mov ");
711                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
712                 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
713             }
714             InlineAsmArch::RiscV32 => {
715                 generated_asm.push_str("    lw ");
716                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
717                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
718             }
719             InlineAsmArch::RiscV64 => {
720                 generated_asm.push_str("    ld ");
721                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
722                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
723             }
724             _ => unimplemented!("restore_register for {:?}", arch),
725         }
726     }
727 }
728
729 fn call_inline_asm<'tcx>(
730     fx: &mut FunctionCx<'_, '_, 'tcx>,
731     asm_name: &str,
732     slot_size: Size,
733     inputs: Vec<(Size, Value)>,
734     outputs: Vec<(Size, CPlace<'tcx>)>,
735 ) {
736     let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
737         kind: StackSlotKind::ExplicitSlot,
738         size: u32::try_from(slot_size.bytes()).unwrap(),
739     });
740     if fx.clif_comments.enabled() {
741         fx.add_comment(stack_slot, "inline asm scratch slot");
742     }
743
744     let inline_asm_func = fx
745         .module
746         .declare_function(
747             asm_name,
748             Linkage::Import,
749             &Signature {
750                 call_conv: CallConv::SystemV,
751                 params: vec![AbiParam::new(fx.pointer_type)],
752                 returns: vec![],
753             },
754         )
755         .unwrap();
756     let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
757     if fx.clif_comments.enabled() {
758         fx.add_comment(inline_asm_func, asm_name);
759     }
760
761     for (offset, value) in inputs {
762         fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
763     }
764
765     let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
766     fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
767
768     for (offset, place) in outputs {
769         let ty = fx.clif_type(place.layout().ty).unwrap();
770         let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
771         place.write_cvalue(fx, CValue::by_val(value, place.layout()));
772     }
773 }