]> git.lizzy.rs Git - rust.git/blob - src/inline_asm.rs
Sync from rust 84f962a89bac3948ed116f1ad04c2f4793fb69ea
[rust.git] / src / inline_asm.rs
1 //! Codegen of `asm!` invocations.
2
3 use crate::prelude::*;
4
5 use std::fmt::Write;
6
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_span::Symbol;
10 use rustc_target::asm::*;
11
12 pub(crate) fn codegen_inline_asm<'tcx>(
13     fx: &mut FunctionCx<'_, '_, 'tcx>,
14     _span: Span,
15     template: &[InlineAsmTemplatePiece],
16     operands: &[InlineAsmOperand<'tcx>],
17     options: InlineAsmOptions,
18 ) {
19     // FIXME add .eh_frame unwind info directives
20
21     if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
22         let true_ = fx.bcx.ins().iconst(types::I32, 1);
23         fx.bcx.ins().trapnz(true_, TrapCode::User(1));
24         return;
25     } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
26         && matches!(
27             template[1],
28             InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
29         )
30         && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
31         && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
32         && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
33         && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
34         && matches!(
35             template[6],
36             InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
37         )
38     {
39         assert_eq!(operands.len(), 4);
40         let (leaf, eax_place) = match operands[1] {
41             InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
42                 assert_eq!(
43                     reg,
44                     InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
45                 );
46                 (
47                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
48                     crate::base::codegen_place(fx, out_place.unwrap()),
49                 )
50             }
51             _ => unreachable!(),
52         };
53         let ebx_place = match operands[0] {
54             InlineAsmOperand::Out { reg, late: true, place } => {
55                 assert_eq!(
56                     reg,
57                     InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
58                         X86InlineAsmRegClass::reg
59                     ))
60                 );
61                 crate::base::codegen_place(fx, place.unwrap())
62             }
63             _ => unreachable!(),
64         };
65         let (sub_leaf, ecx_place) = match operands[2] {
66             InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
67                 assert_eq!(
68                     reg,
69                     InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
70                 );
71                 (
72                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
73                     crate::base::codegen_place(fx, out_place.unwrap()),
74                 )
75             }
76             _ => unreachable!(),
77         };
78         let edx_place = match operands[3] {
79             InlineAsmOperand::Out { reg, late: true, place } => {
80                 assert_eq!(
81                     reg,
82                     InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
83                 );
84                 crate::base::codegen_place(fx, place.unwrap())
85             }
86             _ => unreachable!(),
87         };
88
89         let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
90
91         eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
92         ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
93         ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
94         edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
95         return;
96     } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
97         // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
98         crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
99     } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
100         crate::trap::trap_unimplemented(fx, "Alloca is not supported");
101     }
102
103     let mut inputs = Vec::new();
104     let mut outputs = Vec::new();
105
106     let mut asm_gen = InlineAssemblyGenerator {
107         tcx: fx.tcx,
108         arch: fx.tcx.sess.asm_arch.unwrap(),
109         template,
110         operands,
111         options,
112         registers: Vec::new(),
113         stack_slots_clobber: Vec::new(),
114         stack_slots_input: Vec::new(),
115         stack_slots_output: Vec::new(),
116         stack_slot_size: Size::from_bytes(0),
117     };
118     asm_gen.allocate_registers();
119     asm_gen.allocate_stack_slots();
120
121     let inline_asm_index = fx.cx.inline_asm_index.get();
122     fx.cx.inline_asm_index.set(inline_asm_index + 1);
123     let asm_name = format!(
124         "__inline_asm_{}_n{}",
125         fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
126         inline_asm_index
127     );
128
129     let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
130     fx.cx.global_asm.push_str(&generated_asm);
131
132     for (i, operand) in operands.iter().enumerate() {
133         match *operand {
134             InlineAsmOperand::In { reg: _, ref value } => {
135                 inputs.push((
136                     asm_gen.stack_slots_input[i].unwrap(),
137                     crate::base::codegen_operand(fx, value).load_scalar(fx),
138                 ));
139             }
140             InlineAsmOperand::Out { reg: _, late: _, place } => {
141                 if let Some(place) = place {
142                     outputs.push((
143                         asm_gen.stack_slots_output[i].unwrap(),
144                         crate::base::codegen_place(fx, place),
145                     ));
146                 }
147             }
148             InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
149                 inputs.push((
150                     asm_gen.stack_slots_input[i].unwrap(),
151                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
152                 ));
153                 if let Some(out_place) = out_place {
154                     outputs.push((
155                         asm_gen.stack_slots_output[i].unwrap(),
156                         crate::base::codegen_place(fx, out_place),
157                     ));
158                 }
159             }
160             InlineAsmOperand::Const { value: _ } => todo!(),
161             InlineAsmOperand::SymFn { value: _ } => todo!(),
162             InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
163         }
164     }
165
166     call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
167 }
168
169 struct InlineAssemblyGenerator<'a, 'tcx> {
170     tcx: TyCtxt<'tcx>,
171     arch: InlineAsmArch,
172     template: &'a [InlineAsmTemplatePiece],
173     operands: &'a [InlineAsmOperand<'tcx>],
174     options: InlineAsmOptions,
175     registers: Vec<Option<InlineAsmReg>>,
176     stack_slots_clobber: Vec<Option<Size>>,
177     stack_slots_input: Vec<Option<Size>>,
178     stack_slots_output: Vec<Option<Size>>,
179     stack_slot_size: Size,
180 }
181
182 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
183     fn allocate_registers(&mut self) {
184         let sess = self.tcx.sess;
185         let map = allocatable_registers(
186             self.arch,
187             |feature| sess.target_features.contains(&Symbol::intern(feature)),
188             &sess.target,
189         );
190         let mut allocated = FxHashMap::<_, (bool, bool)>::default();
191         let mut regs = vec![None; self.operands.len()];
192
193         // Add explicit registers to the allocated set.
194         for (i, operand) in self.operands.iter().enumerate() {
195             match *operand {
196                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
197                     regs[i] = Some(reg);
198                     allocated.entry(reg).or_default().0 = true;
199                 }
200                 InlineAsmOperand::Out {
201                     reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
202                 } => {
203                     regs[i] = Some(reg);
204                     allocated.entry(reg).or_default().1 = true;
205                 }
206                 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
207                 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
208                     regs[i] = Some(reg);
209                     allocated.insert(reg, (true, true));
210                 }
211                 _ => (),
212             }
213         }
214
215         // Allocate out/inout/inlateout registers first because they are more constrained.
216         for (i, operand) in self.operands.iter().enumerate() {
217             match *operand {
218                 InlineAsmOperand::Out {
219                     reg: InlineAsmRegOrRegClass::RegClass(class),
220                     late: false,
221                     ..
222                 }
223                 | InlineAsmOperand::InOut {
224                     reg: InlineAsmRegOrRegClass::RegClass(class), ..
225                 } => {
226                     let mut alloc_reg = None;
227                     for &reg in &map[&class] {
228                         let mut used = false;
229                         reg.overlapping_regs(|r| {
230                             if allocated.contains_key(&r) {
231                                 used = true;
232                             }
233                         });
234
235                         if !used {
236                             alloc_reg = Some(reg);
237                             break;
238                         }
239                     }
240
241                     let reg = alloc_reg.expect("cannot allocate registers");
242                     regs[i] = Some(reg);
243                     allocated.insert(reg, (true, true));
244                 }
245                 _ => (),
246             }
247         }
248
249         // Allocate in/lateout.
250         for (i, operand) in self.operands.iter().enumerate() {
251             match *operand {
252                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
253                     let mut alloc_reg = None;
254                     for &reg in &map[&class] {
255                         let mut used = false;
256                         reg.overlapping_regs(|r| {
257                             if allocated.get(&r).copied().unwrap_or_default().0 {
258                                 used = true;
259                             }
260                         });
261
262                         if !used {
263                             alloc_reg = Some(reg);
264                             break;
265                         }
266                     }
267
268                     let reg = alloc_reg.expect("cannot allocate registers");
269                     regs[i] = Some(reg);
270                     allocated.entry(reg).or_default().0 = true;
271                 }
272                 InlineAsmOperand::Out {
273                     reg: InlineAsmRegOrRegClass::RegClass(class),
274                     late: true,
275                     ..
276                 } => {
277                     let mut alloc_reg = None;
278                     for &reg in &map[&class] {
279                         let mut used = false;
280                         reg.overlapping_regs(|r| {
281                             if allocated.get(&r).copied().unwrap_or_default().1 {
282                                 used = true;
283                             }
284                         });
285
286                         if !used {
287                             alloc_reg = Some(reg);
288                             break;
289                         }
290                     }
291
292                     let reg = alloc_reg.expect("cannot allocate registers");
293                     regs[i] = Some(reg);
294                     allocated.entry(reg).or_default().1 = true;
295                 }
296                 _ => (),
297             }
298         }
299
300         self.registers = regs;
301     }
302
303     fn allocate_stack_slots(&mut self) {
304         let mut slot_size = Size::from_bytes(0);
305         let mut slots_clobber = vec![None; self.operands.len()];
306         let mut slots_input = vec![None; self.operands.len()];
307         let mut slots_output = vec![None; self.operands.len()];
308
309         let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
310             let reg_size =
311                 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
312             let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
313             let offset = slot_size.align_to(align);
314             *slot_size = offset + reg_size;
315             offset
316         };
317         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
318
319         // Allocate stack slots for saving clobbered registers
320         let abi_clobber = InlineAsmClobberAbi::parse(
321             self.arch,
322             |feature| self.tcx.sess.target_features.contains(&Symbol::intern(feature)),
323             &self.tcx.sess.target,
324             Symbol::intern("C"),
325         )
326         .unwrap()
327         .clobbered_regs();
328         for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
329             let mut need_save = true;
330             // If the register overlaps with a register clobbered by function call, then
331             // we don't need to save it.
332             for r in abi_clobber {
333                 r.overlapping_regs(|r| {
334                     if r == reg {
335                         need_save = false;
336                     }
337                 });
338
339                 if !need_save {
340                     break;
341                 }
342             }
343
344             if need_save {
345                 slots_clobber[i] = Some(new_slot(reg.reg_class()));
346             }
347         }
348
349         // Allocate stack slots for inout
350         for (i, operand) in self.operands.iter().enumerate() {
351             match *operand {
352                 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
353                     let slot = new_slot(reg.reg_class());
354                     slots_input[i] = Some(slot);
355                     slots_output[i] = Some(slot);
356                 }
357                 _ => (),
358             }
359         }
360
361         let slot_size_before_input = slot_size;
362         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
363
364         // Allocate stack slots for input
365         for (i, operand) in self.operands.iter().enumerate() {
366             match *operand {
367                 InlineAsmOperand::In { reg, .. }
368                 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
369                     slots_input[i] = Some(new_slot(reg.reg_class()));
370                 }
371                 _ => (),
372             }
373         }
374
375         // Reset slot size to before input so that input and output operands can overlap
376         // and save some memory.
377         let slot_size_after_input = slot_size;
378         slot_size = slot_size_before_input;
379         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
380
381         // Allocate stack slots for output
382         for (i, operand) in self.operands.iter().enumerate() {
383             match *operand {
384                 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
385                     slots_output[i] = Some(new_slot(reg.reg_class()));
386                 }
387                 _ => (),
388             }
389         }
390
391         slot_size = slot_size.max(slot_size_after_input);
392
393         self.stack_slots_clobber = slots_clobber;
394         self.stack_slots_input = slots_input;
395         self.stack_slots_output = slots_output;
396         self.stack_slot_size = slot_size;
397     }
398
399     fn generate_asm_wrapper(&self, asm_name: &str) -> String {
400         let mut generated_asm = String::new();
401         writeln!(generated_asm, ".globl {}", asm_name).unwrap();
402         writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
403         writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
404         writeln!(generated_asm, "{}:", asm_name).unwrap();
405
406         let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
407
408         if is_x86 {
409             generated_asm.push_str(".intel_syntax noprefix\n");
410         }
411         Self::prologue(&mut generated_asm, self.arch);
412
413         // Save clobbered registers
414         if !self.options.contains(InlineAsmOptions::NORETURN) {
415             for (reg, slot) in self
416                 .registers
417                 .iter()
418                 .zip(self.stack_slots_clobber.iter().copied())
419                 .filter_map(|(r, s)| r.zip(s))
420             {
421                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
422             }
423         }
424
425         // Write input registers
426         for (reg, slot) in self
427             .registers
428             .iter()
429             .zip(self.stack_slots_input.iter().copied())
430             .filter_map(|(r, s)| r.zip(s))
431         {
432             Self::restore_register(&mut generated_asm, self.arch, reg, slot);
433         }
434
435         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
436             generated_asm.push_str(".att_syntax\n");
437         }
438
439         // The actual inline asm
440         for piece in self.template {
441             match piece {
442                 InlineAsmTemplatePiece::String(s) => {
443                     generated_asm.push_str(s);
444                 }
445                 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
446                     if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
447                         generated_asm.push('%');
448                     }
449                     self.registers[*operand_idx]
450                         .unwrap()
451                         .emit(&mut generated_asm, self.arch, *modifier)
452                         .unwrap();
453                 }
454             }
455         }
456         generated_asm.push('\n');
457
458         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
459             generated_asm.push_str(".intel_syntax noprefix\n");
460         }
461
462         if !self.options.contains(InlineAsmOptions::NORETURN) {
463             // Read output registers
464             for (reg, slot) in self
465                 .registers
466                 .iter()
467                 .zip(self.stack_slots_output.iter().copied())
468                 .filter_map(|(r, s)| r.zip(s))
469             {
470                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
471             }
472
473             // Restore clobbered registers
474             for (reg, slot) in self
475                 .registers
476                 .iter()
477                 .zip(self.stack_slots_clobber.iter().copied())
478                 .filter_map(|(r, s)| r.zip(s))
479             {
480                 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
481             }
482
483             Self::epilogue(&mut generated_asm, self.arch);
484         } else {
485             Self::epilogue_noreturn(&mut generated_asm, self.arch);
486         }
487
488         if is_x86 {
489             generated_asm.push_str(".att_syntax\n");
490         }
491         writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
492         generated_asm.push_str(".text\n");
493         generated_asm.push_str("\n\n");
494
495         generated_asm
496     }
497
498     fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
499         match arch {
500             InlineAsmArch::X86 => {
501                 generated_asm.push_str("    push ebp\n");
502                 generated_asm.push_str("    mov ebp,[esp+8]\n");
503             }
504             InlineAsmArch::X86_64 => {
505                 generated_asm.push_str("    push rbp\n");
506                 generated_asm.push_str("    mov rbp,rdi\n");
507             }
508             InlineAsmArch::RiscV32 => {
509                 generated_asm.push_str("    addi sp, sp, -8\n");
510                 generated_asm.push_str("    sw ra, 4(sp)\n");
511                 generated_asm.push_str("    sw s0, 0(sp)\n");
512                 generated_asm.push_str("    mv s0, a0\n");
513             }
514             InlineAsmArch::RiscV64 => {
515                 generated_asm.push_str("    addi sp, sp, -16\n");
516                 generated_asm.push_str("    sd ra, 8(sp)\n");
517                 generated_asm.push_str("    sd s0, 0(sp)\n");
518                 generated_asm.push_str("    mv s0, a0\n");
519             }
520             _ => unimplemented!("prologue for {:?}", arch),
521         }
522     }
523
524     fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
525         match arch {
526             InlineAsmArch::X86 => {
527                 generated_asm.push_str("    pop ebp\n");
528                 generated_asm.push_str("    ret\n");
529             }
530             InlineAsmArch::X86_64 => {
531                 generated_asm.push_str("    pop rbp\n");
532                 generated_asm.push_str("    ret\n");
533             }
534             InlineAsmArch::RiscV32 => {
535                 generated_asm.push_str("    lw s0, 0(sp)\n");
536                 generated_asm.push_str("    lw ra, 4(sp)\n");
537                 generated_asm.push_str("    addi sp, sp, 8\n");
538                 generated_asm.push_str("    ret\n");
539             }
540             InlineAsmArch::RiscV64 => {
541                 generated_asm.push_str("    ld s0, 0(sp)\n");
542                 generated_asm.push_str("    ld ra, 8(sp)\n");
543                 generated_asm.push_str("    addi sp, sp, 16\n");
544                 generated_asm.push_str("    ret\n");
545             }
546             _ => unimplemented!("epilogue for {:?}", arch),
547         }
548     }
549
550     fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
551         match arch {
552             InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
553                 generated_asm.push_str("    ud2\n");
554             }
555             InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
556                 generated_asm.push_str("    ebreak\n");
557             }
558             _ => unimplemented!("epilogue_noreturn for {:?}", arch),
559         }
560     }
561
562     fn save_register(
563         generated_asm: &mut String,
564         arch: InlineAsmArch,
565         reg: InlineAsmReg,
566         offset: Size,
567     ) {
568         match arch {
569             InlineAsmArch::X86 => {
570                 write!(generated_asm, "    mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
571                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
572                 generated_asm.push('\n');
573             }
574             InlineAsmArch::X86_64 => {
575                 write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
576                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
577                 generated_asm.push('\n');
578             }
579             InlineAsmArch::RiscV32 => {
580                 generated_asm.push_str("    sw ");
581                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
582                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
583             }
584             InlineAsmArch::RiscV64 => {
585                 generated_asm.push_str("    sd ");
586                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
587                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
588             }
589             _ => unimplemented!("save_register for {:?}", arch),
590         }
591     }
592
593     fn restore_register(
594         generated_asm: &mut String,
595         arch: InlineAsmArch,
596         reg: InlineAsmReg,
597         offset: Size,
598     ) {
599         match arch {
600             InlineAsmArch::X86 => {
601                 generated_asm.push_str("    mov ");
602                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
603                 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
604             }
605             InlineAsmArch::X86_64 => {
606                 generated_asm.push_str("    mov ");
607                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
608                 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
609             }
610             InlineAsmArch::RiscV32 => {
611                 generated_asm.push_str("    lw ");
612                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
613                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
614             }
615             InlineAsmArch::RiscV64 => {
616                 generated_asm.push_str("    ld ");
617                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
618                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
619             }
620             _ => unimplemented!("restore_register for {:?}", arch),
621         }
622     }
623 }
624
625 fn call_inline_asm<'tcx>(
626     fx: &mut FunctionCx<'_, '_, 'tcx>,
627     asm_name: &str,
628     slot_size: Size,
629     inputs: Vec<(Size, Value)>,
630     outputs: Vec<(Size, CPlace<'tcx>)>,
631 ) {
632     let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
633         kind: StackSlotKind::ExplicitSlot,
634         size: u32::try_from(slot_size.bytes()).unwrap(),
635     });
636     if fx.clif_comments.enabled() {
637         fx.add_comment(stack_slot, "inline asm scratch slot");
638     }
639
640     let inline_asm_func = fx
641         .module
642         .declare_function(
643             asm_name,
644             Linkage::Import,
645             &Signature {
646                 call_conv: CallConv::SystemV,
647                 params: vec![AbiParam::new(fx.pointer_type)],
648                 returns: vec![],
649             },
650         )
651         .unwrap();
652     let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
653     if fx.clif_comments.enabled() {
654         fx.add_comment(inline_asm_func, asm_name);
655     }
656
657     for (offset, value) in inputs {
658         fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
659     }
660
661     let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
662     fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
663
664     for (offset, place) in outputs {
665         let ty = fx.clif_type(place.layout().ty).unwrap();
666         let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
667         place.write_cvalue(fx, CValue::by_val(value, place.layout()));
668     }
669 }