]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/inline_asm.rs
Auto merge of #100942 - ehuss:update-cargo, r=ehuss
[rust.git] / compiler / rustc_codegen_cranelift / src / inline_asm.rs
1 //! Codegen of `asm!` invocations.
2
3 use crate::prelude::*;
4
5 use std::fmt::Write;
6
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_span::sym;
10 use rustc_target::asm::*;
11
12 pub(crate) fn codegen_inline_asm<'tcx>(
13     fx: &mut FunctionCx<'_, '_, 'tcx>,
14     _span: Span,
15     template: &[InlineAsmTemplatePiece],
16     operands: &[InlineAsmOperand<'tcx>],
17     options: InlineAsmOptions,
18 ) {
19     // FIXME add .eh_frame unwind info directives
20
21     if !template.is_empty() {
22         if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
23             let true_ = fx.bcx.ins().iconst(types::I32, 1);
24             fx.bcx.ins().trapnz(true_, TrapCode::User(1));
25             return;
26         } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
27             && matches!(
28                 template[1],
29                 InlineAsmTemplatePiece::Placeholder {
30                     operand_idx: 0,
31                     modifier: Some('r'),
32                     span: _
33                 }
34             )
35             && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
36             && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
37             && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
38             && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
39             && matches!(
40                 template[6],
41                 InlineAsmTemplatePiece::Placeholder {
42                     operand_idx: 0,
43                     modifier: Some('r'),
44                     span: _
45                 }
46             )
47         {
48             assert_eq!(operands.len(), 4);
49             let (leaf, eax_place) = match operands[1] {
50                 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
51                     assert_eq!(
52                         reg,
53                         InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
54                     );
55                     (
56                         crate::base::codegen_operand(fx, in_value).load_scalar(fx),
57                         crate::base::codegen_place(fx, out_place.unwrap()),
58                     )
59                 }
60                 _ => unreachable!(),
61             };
62             let ebx_place = match operands[0] {
63                 InlineAsmOperand::Out { reg, late: true, place } => {
64                     assert_eq!(
65                         reg,
66                         InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
67                             X86InlineAsmRegClass::reg
68                         ))
69                     );
70                     crate::base::codegen_place(fx, place.unwrap())
71                 }
72                 _ => unreachable!(),
73             };
74             let (sub_leaf, ecx_place) = match operands[2] {
75                 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
76                     assert_eq!(
77                         reg,
78                         InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
79                     );
80                     (
81                         crate::base::codegen_operand(fx, in_value).load_scalar(fx),
82                         crate::base::codegen_place(fx, out_place.unwrap()),
83                     )
84                 }
85                 _ => unreachable!(),
86             };
87             let edx_place = match operands[3] {
88                 InlineAsmOperand::Out { reg, late: true, place } => {
89                     assert_eq!(
90                         reg,
91                         InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
92                     );
93                     crate::base::codegen_place(fx, place.unwrap())
94                 }
95                 _ => unreachable!(),
96             };
97
98             let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
99
100             eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
101             ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
102             ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
103             edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
104             return;
105         } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
106             // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
107             crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
108         } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
109             crate::trap::trap_unimplemented(fx, "Alloca is not supported");
110         }
111     }
112
113     let mut inputs = Vec::new();
114     let mut outputs = Vec::new();
115
116     let mut asm_gen = InlineAssemblyGenerator {
117         tcx: fx.tcx,
118         arch: fx.tcx.sess.asm_arch.unwrap(),
119         enclosing_def_id: fx.instance.def_id(),
120         template,
121         operands,
122         options,
123         registers: Vec::new(),
124         stack_slots_clobber: Vec::new(),
125         stack_slots_input: Vec::new(),
126         stack_slots_output: Vec::new(),
127         stack_slot_size: Size::from_bytes(0),
128     };
129     asm_gen.allocate_registers();
130     asm_gen.allocate_stack_slots();
131
132     let inline_asm_index = fx.cx.inline_asm_index.get();
133     fx.cx.inline_asm_index.set(inline_asm_index + 1);
134     let asm_name = format!(
135         "__inline_asm_{}_n{}",
136         fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
137         inline_asm_index
138     );
139
140     let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
141     fx.cx.global_asm.push_str(&generated_asm);
142
143     for (i, operand) in operands.iter().enumerate() {
144         match *operand {
145             InlineAsmOperand::In { reg: _, ref value } => {
146                 inputs.push((
147                     asm_gen.stack_slots_input[i].unwrap(),
148                     crate::base::codegen_operand(fx, value).load_scalar(fx),
149                 ));
150             }
151             InlineAsmOperand::Out { reg: _, late: _, place } => {
152                 if let Some(place) = place {
153                     outputs.push((
154                         asm_gen.stack_slots_output[i].unwrap(),
155                         crate::base::codegen_place(fx, place),
156                     ));
157                 }
158             }
159             InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
160                 inputs.push((
161                     asm_gen.stack_slots_input[i].unwrap(),
162                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
163                 ));
164                 if let Some(out_place) = out_place {
165                     outputs.push((
166                         asm_gen.stack_slots_output[i].unwrap(),
167                         crate::base::codegen_place(fx, out_place),
168                     ));
169                 }
170             }
171             InlineAsmOperand::Const { value: _ } => todo!(),
172             InlineAsmOperand::SymFn { value: _ } => todo!(),
173             InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
174         }
175     }
176
177     call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
178 }
179
180 struct InlineAssemblyGenerator<'a, 'tcx> {
181     tcx: TyCtxt<'tcx>,
182     arch: InlineAsmArch,
183     enclosing_def_id: DefId,
184     template: &'a [InlineAsmTemplatePiece],
185     operands: &'a [InlineAsmOperand<'tcx>],
186     options: InlineAsmOptions,
187     registers: Vec<Option<InlineAsmReg>>,
188     stack_slots_clobber: Vec<Option<Size>>,
189     stack_slots_input: Vec<Option<Size>>,
190     stack_slots_output: Vec<Option<Size>>,
191     stack_slot_size: Size,
192 }
193
194 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
195     fn allocate_registers(&mut self) {
196         let sess = self.tcx.sess;
197         let map = allocatable_registers(
198             self.arch,
199             sess.relocation_model(),
200             self.tcx.asm_target_features(self.enclosing_def_id),
201             &sess.target,
202         );
203         let mut allocated = FxHashMap::<_, (bool, bool)>::default();
204         let mut regs = vec![None; self.operands.len()];
205
206         // Add explicit registers to the allocated set.
207         for (i, operand) in self.operands.iter().enumerate() {
208             match *operand {
209                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
210                     regs[i] = Some(reg);
211                     allocated.entry(reg).or_default().0 = true;
212                 }
213                 InlineAsmOperand::Out {
214                     reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
215                 } => {
216                     regs[i] = Some(reg);
217                     allocated.entry(reg).or_default().1 = true;
218                 }
219                 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
220                 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
221                     regs[i] = Some(reg);
222                     allocated.insert(reg, (true, true));
223                 }
224                 _ => (),
225             }
226         }
227
228         // Allocate out/inout/inlateout registers first because they are more constrained.
229         for (i, operand) in self.operands.iter().enumerate() {
230             match *operand {
231                 InlineAsmOperand::Out {
232                     reg: InlineAsmRegOrRegClass::RegClass(class),
233                     late: false,
234                     ..
235                 }
236                 | InlineAsmOperand::InOut {
237                     reg: InlineAsmRegOrRegClass::RegClass(class), ..
238                 } => {
239                     let mut alloc_reg = None;
240                     for &reg in &map[&class] {
241                         let mut used = false;
242                         reg.overlapping_regs(|r| {
243                             if allocated.contains_key(&r) {
244                                 used = true;
245                             }
246                         });
247
248                         if !used {
249                             alloc_reg = Some(reg);
250                             break;
251                         }
252                     }
253
254                     let reg = alloc_reg.expect("cannot allocate registers");
255                     regs[i] = Some(reg);
256                     allocated.insert(reg, (true, true));
257                 }
258                 _ => (),
259             }
260         }
261
262         // Allocate in/lateout.
263         for (i, operand) in self.operands.iter().enumerate() {
264             match *operand {
265                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
266                     let mut alloc_reg = None;
267                     for &reg in &map[&class] {
268                         let mut used = false;
269                         reg.overlapping_regs(|r| {
270                             if allocated.get(&r).copied().unwrap_or_default().0 {
271                                 used = true;
272                             }
273                         });
274
275                         if !used {
276                             alloc_reg = Some(reg);
277                             break;
278                         }
279                     }
280
281                     let reg = alloc_reg.expect("cannot allocate registers");
282                     regs[i] = Some(reg);
283                     allocated.entry(reg).or_default().0 = true;
284                 }
285                 InlineAsmOperand::Out {
286                     reg: InlineAsmRegOrRegClass::RegClass(class),
287                     late: true,
288                     ..
289                 } => {
290                     let mut alloc_reg = None;
291                     for &reg in &map[&class] {
292                         let mut used = false;
293                         reg.overlapping_regs(|r| {
294                             if allocated.get(&r).copied().unwrap_or_default().1 {
295                                 used = true;
296                             }
297                         });
298
299                         if !used {
300                             alloc_reg = Some(reg);
301                             break;
302                         }
303                     }
304
305                     let reg = alloc_reg.expect("cannot allocate registers");
306                     regs[i] = Some(reg);
307                     allocated.entry(reg).or_default().1 = true;
308                 }
309                 _ => (),
310             }
311         }
312
313         self.registers = regs;
314     }
315
316     fn allocate_stack_slots(&mut self) {
317         let mut slot_size = Size::from_bytes(0);
318         let mut slots_clobber = vec![None; self.operands.len()];
319         let mut slots_input = vec![None; self.operands.len()];
320         let mut slots_output = vec![None; self.operands.len()];
321
322         let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
323             let reg_size =
324                 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
325             let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
326             let offset = slot_size.align_to(align);
327             *slot_size = offset + reg_size;
328             offset
329         };
330         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
331
332         // Allocate stack slots for saving clobbered registers
333         let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
334             .unwrap()
335             .clobbered_regs();
336         for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
337             let mut need_save = true;
338             // If the register overlaps with a register clobbered by function call, then
339             // we don't need to save it.
340             for r in abi_clobber {
341                 r.overlapping_regs(|r| {
342                     if r == reg {
343                         need_save = false;
344                     }
345                 });
346
347                 if !need_save {
348                     break;
349                 }
350             }
351
352             if need_save {
353                 slots_clobber[i] = Some(new_slot(reg.reg_class()));
354             }
355         }
356
357         // Allocate stack slots for inout
358         for (i, operand) in self.operands.iter().enumerate() {
359             match *operand {
360                 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
361                     let slot = new_slot(reg.reg_class());
362                     slots_input[i] = Some(slot);
363                     slots_output[i] = Some(slot);
364                 }
365                 _ => (),
366             }
367         }
368
369         let slot_size_before_input = slot_size;
370         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
371
372         // Allocate stack slots for input
373         for (i, operand) in self.operands.iter().enumerate() {
374             match *operand {
375                 InlineAsmOperand::In { reg, .. }
376                 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
377                     slots_input[i] = Some(new_slot(reg.reg_class()));
378                 }
379                 _ => (),
380             }
381         }
382
383         // Reset slot size to before input so that input and output operands can overlap
384         // and save some memory.
385         let slot_size_after_input = slot_size;
386         slot_size = slot_size_before_input;
387         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
388
389         // Allocate stack slots for output
390         for (i, operand) in self.operands.iter().enumerate() {
391             match *operand {
392                 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
393                     slots_output[i] = Some(new_slot(reg.reg_class()));
394                 }
395                 _ => (),
396             }
397         }
398
399         slot_size = slot_size.max(slot_size_after_input);
400
401         self.stack_slots_clobber = slots_clobber;
402         self.stack_slots_input = slots_input;
403         self.stack_slots_output = slots_output;
404         self.stack_slot_size = slot_size;
405     }
406
407     fn generate_asm_wrapper(&self, asm_name: &str) -> String {
408         let mut generated_asm = String::new();
409         writeln!(generated_asm, ".globl {}", asm_name).unwrap();
410         writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
411         writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
412         writeln!(generated_asm, "{}:", asm_name).unwrap();
413
414         let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
415
416         if is_x86 {
417             generated_asm.push_str(".intel_syntax noprefix\n");
418         }
419         Self::prologue(&mut generated_asm, self.arch);
420
421         // Save clobbered registers
422         if !self.options.contains(InlineAsmOptions::NORETURN) {
423             for (reg, slot) in self
424                 .registers
425                 .iter()
426                 .zip(self.stack_slots_clobber.iter().copied())
427                 .filter_map(|(r, s)| r.zip(s))
428             {
429                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
430             }
431         }
432
433         // Write input registers
434         for (reg, slot) in self
435             .registers
436             .iter()
437             .zip(self.stack_slots_input.iter().copied())
438             .filter_map(|(r, s)| r.zip(s))
439         {
440             Self::restore_register(&mut generated_asm, self.arch, reg, slot);
441         }
442
443         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
444             generated_asm.push_str(".att_syntax\n");
445         }
446
447         // The actual inline asm
448         for piece in self.template {
449             match piece {
450                 InlineAsmTemplatePiece::String(s) => {
451                     generated_asm.push_str(s);
452                 }
453                 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
454                     if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
455                         generated_asm.push('%');
456                     }
457                     self.registers[*operand_idx]
458                         .unwrap()
459                         .emit(&mut generated_asm, self.arch, *modifier)
460                         .unwrap();
461                 }
462             }
463         }
464         generated_asm.push('\n');
465
466         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
467             generated_asm.push_str(".intel_syntax noprefix\n");
468         }
469
470         if !self.options.contains(InlineAsmOptions::NORETURN) {
471             // Read output registers
472             for (reg, slot) in self
473                 .registers
474                 .iter()
475                 .zip(self.stack_slots_output.iter().copied())
476                 .filter_map(|(r, s)| r.zip(s))
477             {
478                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
479             }
480
481             // Restore clobbered registers
482             for (reg, slot) in self
483                 .registers
484                 .iter()
485                 .zip(self.stack_slots_clobber.iter().copied())
486                 .filter_map(|(r, s)| r.zip(s))
487             {
488                 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
489             }
490
491             Self::epilogue(&mut generated_asm, self.arch);
492         } else {
493             Self::epilogue_noreturn(&mut generated_asm, self.arch);
494         }
495
496         if is_x86 {
497             generated_asm.push_str(".att_syntax\n");
498         }
499         writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
500         generated_asm.push_str(".text\n");
501         generated_asm.push_str("\n\n");
502
503         generated_asm
504     }
505
506     fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
507         match arch {
508             InlineAsmArch::X86 => {
509                 generated_asm.push_str("    push ebp\n");
510                 generated_asm.push_str("    mov ebp,[esp+8]\n");
511             }
512             InlineAsmArch::X86_64 => {
513                 generated_asm.push_str("    push rbp\n");
514                 generated_asm.push_str("    mov rbp,rdi\n");
515             }
516             InlineAsmArch::RiscV32 => {
517                 generated_asm.push_str("    addi sp, sp, -8\n");
518                 generated_asm.push_str("    sw ra, 4(sp)\n");
519                 generated_asm.push_str("    sw s0, 0(sp)\n");
520                 generated_asm.push_str("    mv s0, a0\n");
521             }
522             InlineAsmArch::RiscV64 => {
523                 generated_asm.push_str("    addi sp, sp, -16\n");
524                 generated_asm.push_str("    sd ra, 8(sp)\n");
525                 generated_asm.push_str("    sd s0, 0(sp)\n");
526                 generated_asm.push_str("    mv s0, a0\n");
527             }
528             _ => unimplemented!("prologue for {:?}", arch),
529         }
530     }
531
532     fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
533         match arch {
534             InlineAsmArch::X86 => {
535                 generated_asm.push_str("    pop ebp\n");
536                 generated_asm.push_str("    ret\n");
537             }
538             InlineAsmArch::X86_64 => {
539                 generated_asm.push_str("    pop rbp\n");
540                 generated_asm.push_str("    ret\n");
541             }
542             InlineAsmArch::RiscV32 => {
543                 generated_asm.push_str("    lw s0, 0(sp)\n");
544                 generated_asm.push_str("    lw ra, 4(sp)\n");
545                 generated_asm.push_str("    addi sp, sp, 8\n");
546                 generated_asm.push_str("    ret\n");
547             }
548             InlineAsmArch::RiscV64 => {
549                 generated_asm.push_str("    ld s0, 0(sp)\n");
550                 generated_asm.push_str("    ld ra, 8(sp)\n");
551                 generated_asm.push_str("    addi sp, sp, 16\n");
552                 generated_asm.push_str("    ret\n");
553             }
554             _ => unimplemented!("epilogue for {:?}", arch),
555         }
556     }
557
558     fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
559         match arch {
560             InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
561                 generated_asm.push_str("    ud2\n");
562             }
563             InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
564                 generated_asm.push_str("    ebreak\n");
565             }
566             _ => unimplemented!("epilogue_noreturn for {:?}", arch),
567         }
568     }
569
570     fn save_register(
571         generated_asm: &mut String,
572         arch: InlineAsmArch,
573         reg: InlineAsmReg,
574         offset: Size,
575     ) {
576         match arch {
577             InlineAsmArch::X86 => {
578                 write!(generated_asm, "    mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
579                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
580                 generated_asm.push('\n');
581             }
582             InlineAsmArch::X86_64 => {
583                 write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
584                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
585                 generated_asm.push('\n');
586             }
587             InlineAsmArch::RiscV32 => {
588                 generated_asm.push_str("    sw ");
589                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
590                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
591             }
592             InlineAsmArch::RiscV64 => {
593                 generated_asm.push_str("    sd ");
594                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
595                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
596             }
597             _ => unimplemented!("save_register for {:?}", arch),
598         }
599     }
600
601     fn restore_register(
602         generated_asm: &mut String,
603         arch: InlineAsmArch,
604         reg: InlineAsmReg,
605         offset: Size,
606     ) {
607         match arch {
608             InlineAsmArch::X86 => {
609                 generated_asm.push_str("    mov ");
610                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
611                 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
612             }
613             InlineAsmArch::X86_64 => {
614                 generated_asm.push_str("    mov ");
615                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
616                 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
617             }
618             InlineAsmArch::RiscV32 => {
619                 generated_asm.push_str("    lw ");
620                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
621                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
622             }
623             InlineAsmArch::RiscV64 => {
624                 generated_asm.push_str("    ld ");
625                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
626                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
627             }
628             _ => unimplemented!("restore_register for {:?}", arch),
629         }
630     }
631 }
632
633 fn call_inline_asm<'tcx>(
634     fx: &mut FunctionCx<'_, '_, 'tcx>,
635     asm_name: &str,
636     slot_size: Size,
637     inputs: Vec<(Size, Value)>,
638     outputs: Vec<(Size, CPlace<'tcx>)>,
639 ) {
640     let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
641         kind: StackSlotKind::ExplicitSlot,
642         size: u32::try_from(slot_size.bytes()).unwrap(),
643     });
644     if fx.clif_comments.enabled() {
645         fx.add_comment(stack_slot, "inline asm scratch slot");
646     }
647
648     let inline_asm_func = fx
649         .module
650         .declare_function(
651             asm_name,
652             Linkage::Import,
653             &Signature {
654                 call_conv: CallConv::SystemV,
655                 params: vec![AbiParam::new(fx.pointer_type)],
656                 returns: vec![],
657             },
658         )
659         .unwrap();
660     let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
661     if fx.clif_comments.enabled() {
662         fx.add_comment(inline_asm_func, asm_name);
663     }
664
665     for (offset, value) in inputs {
666         fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
667     }
668
669     let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
670     fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
671
672     for (offset, place) in outputs {
673         let ty = fx.clif_type(place.layout().ty).unwrap();
674         let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
675         place.write_cvalue(fx, CValue::by_val(value, place.layout()));
676     }
677 }