]> git.lizzy.rs Git - rust.git/blob - src/inline_asm.rs
Merge pull request #1206 from nbdd0121/master
[rust.git] / src / inline_asm.rs
1 //! Codegen of [`asm!`] invocations.
2
3 use crate::prelude::*;
4
5 use std::fmt::Write;
6
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_span::Symbol;
10 use rustc_target::asm::*;
11
12 pub(crate) fn codegen_inline_asm<'tcx>(
13     fx: &mut FunctionCx<'_, '_, 'tcx>,
14     _span: Span,
15     template: &[InlineAsmTemplatePiece],
16     operands: &[InlineAsmOperand<'tcx>],
17     options: InlineAsmOptions,
18 ) {
19     // FIXME add .eh_frame unwind info directives
20
21     if template.is_empty() {
22         // Black box
23         return;
24     } else if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
25         let true_ = fx.bcx.ins().iconst(types::I32, 1);
26         fx.bcx.ins().trapnz(true_, TrapCode::User(1));
27         return;
28     } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
29         && matches!(
30             template[1],
31             InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
32         )
33         && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
34         && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
35         && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
36         && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
37         && matches!(
38             template[6],
39             InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
40         )
41     {
42         assert_eq!(operands.len(), 4);
43         let (leaf, eax_place) = match operands[1] {
44             InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
45                 assert_eq!(
46                     reg,
47                     InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
48                 );
49                 (
50                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
51                     crate::base::codegen_place(fx, out_place.unwrap()),
52                 )
53             }
54             _ => unreachable!(),
55         };
56         let ebx_place = match operands[0] {
57             InlineAsmOperand::Out { reg, late: true, place } => {
58                 assert_eq!(
59                     reg,
60                     InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
61                         X86InlineAsmRegClass::reg
62                     ))
63                 );
64                 crate::base::codegen_place(fx, place.unwrap())
65             }
66             _ => unreachable!(),
67         };
68         let (sub_leaf, ecx_place) = match operands[2] {
69             InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
70                 assert_eq!(
71                     reg,
72                     InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
73                 );
74                 (
75                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
76                     crate::base::codegen_place(fx, out_place.unwrap()),
77                 )
78             }
79             _ => unreachable!(),
80         };
81         let edx_place = match operands[3] {
82             InlineAsmOperand::Out { reg, late: true, place } => {
83                 assert_eq!(
84                     reg,
85                     InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
86                 );
87                 crate::base::codegen_place(fx, place.unwrap())
88             }
89             _ => unreachable!(),
90         };
91
92         let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
93
94         eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
95         ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
96         ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
97         edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
98         return;
99     } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
100         // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
101         crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
102     } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
103         crate::trap::trap_unimplemented(fx, "Alloca is not supported");
104     }
105
106     let mut inputs = Vec::new();
107     let mut outputs = Vec::new();
108
109     let mut asm_gen = InlineAssemblyGenerator {
110         tcx: fx.tcx,
111         arch: fx.tcx.sess.asm_arch.unwrap(),
112         template,
113         operands,
114         options,
115         registers: Vec::new(),
116         stack_slots_clobber: Vec::new(),
117         stack_slots_input: Vec::new(),
118         stack_slots_output: Vec::new(),
119         stack_slot_size: Size::from_bytes(0),
120     };
121     asm_gen.allocate_registers();
122     asm_gen.allocate_stack_slots();
123
124     let inline_asm_index = fx.inline_asm_index;
125     fx.inline_asm_index += 1;
126     let asm_name = format!("{}__inline_asm_{}", fx.symbol_name, inline_asm_index);
127
128     let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
129     fx.cx.global_asm.push_str(&generated_asm);
130
131     for (i, operand) in operands.iter().enumerate() {
132         match *operand {
133             InlineAsmOperand::In { reg: _, ref value } => {
134                 inputs.push((
135                     asm_gen.stack_slots_input[i].unwrap(),
136                     crate::base::codegen_operand(fx, value).load_scalar(fx),
137                 ));
138             }
139             InlineAsmOperand::Out { reg: _, late: _, place } => {
140                 if let Some(place) = place {
141                     outputs.push((
142                         asm_gen.stack_slots_output[i].unwrap(),
143                         crate::base::codegen_place(fx, place),
144                     ));
145                 }
146             }
147             InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
148                 inputs.push((
149                     asm_gen.stack_slots_input[i].unwrap(),
150                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
151                 ));
152                 if let Some(out_place) = out_place {
153                     outputs.push((
154                         asm_gen.stack_slots_output[i].unwrap(),
155                         crate::base::codegen_place(fx, out_place),
156                     ));
157                 }
158             }
159             InlineAsmOperand::Const { value: _ } => todo!(),
160             InlineAsmOperand::SymFn { value: _ } => todo!(),
161             InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
162         }
163     }
164
165     call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
166 }
167
168 struct InlineAssemblyGenerator<'a, 'tcx> {
169     tcx: TyCtxt<'tcx>,
170     arch: InlineAsmArch,
171     template: &'a [InlineAsmTemplatePiece],
172     operands: &'a [InlineAsmOperand<'tcx>],
173     options: InlineAsmOptions,
174     registers: Vec<Option<InlineAsmReg>>,
175     stack_slots_clobber: Vec<Option<Size>>,
176     stack_slots_input: Vec<Option<Size>>,
177     stack_slots_output: Vec<Option<Size>>,
178     stack_slot_size: Size,
179 }
180
181 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
182     fn allocate_registers(&mut self) {
183         let sess = self.tcx.sess;
184         let map = allocatable_registers(
185             self.arch,
186             |feature| sess.target_features.contains(&Symbol::intern(feature)),
187             &sess.target,
188         );
189         let mut allocated = FxHashMap::<_, (bool, bool)>::default();
190         let mut regs = vec![None; self.operands.len()];
191
192         // Add explicit registers to the allocated set.
193         for (i, operand) in self.operands.iter().enumerate() {
194             match *operand {
195                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
196                     regs[i] = Some(reg);
197                     allocated.entry(reg).or_default().0 = true;
198                 }
199                 InlineAsmOperand::Out {
200                     reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
201                 } => {
202                     regs[i] = Some(reg);
203                     allocated.entry(reg).or_default().1 = true;
204                 }
205                 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
206                 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
207                     regs[i] = Some(reg);
208                     allocated.insert(reg, (true, true));
209                 }
210                 _ => (),
211             }
212         }
213
214         // Allocate out/inout/inlateout registers first because they are more constrained.
215         for (i, operand) in self.operands.iter().enumerate() {
216             match *operand {
217                 InlineAsmOperand::Out {
218                     reg: InlineAsmRegOrRegClass::RegClass(class),
219                     late: false,
220                     ..
221                 }
222                 | InlineAsmOperand::InOut {
223                     reg: InlineAsmRegOrRegClass::RegClass(class), ..
224                 } => {
225                     let mut alloc_reg = None;
226                     for &reg in &map[&class] {
227                         let mut used = false;
228                         reg.overlapping_regs(|r| {
229                             if allocated.contains_key(&r) {
230                                 used = true;
231                             }
232                         });
233
234                         if !used {
235                             alloc_reg = Some(reg);
236                             break;
237                         }
238                     }
239
240                     let reg = alloc_reg.expect("cannot allocate registers");
241                     regs[i] = Some(reg);
242                     allocated.insert(reg, (true, true));
243                 }
244                 _ => (),
245             }
246         }
247
248         // Allocate in/lateout.
249         for (i, operand) in self.operands.iter().enumerate() {
250             match *operand {
251                 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
252                     let mut alloc_reg = None;
253                     for &reg in &map[&class] {
254                         let mut used = false;
255                         reg.overlapping_regs(|r| {
256                             if allocated.get(&r).copied().unwrap_or_default().0 {
257                                 used = true;
258                             }
259                         });
260
261                         if !used {
262                             alloc_reg = Some(reg);
263                             break;
264                         }
265                     }
266
267                     let reg = alloc_reg.expect("cannot allocate registers");
268                     regs[i] = Some(reg);
269                     allocated.entry(reg).or_default().0 = true;
270                 }
271                 InlineAsmOperand::Out {
272                     reg: InlineAsmRegOrRegClass::RegClass(class),
273                     late: true,
274                     ..
275                 } => {
276                     let mut alloc_reg = None;
277                     for &reg in &map[&class] {
278                         let mut used = false;
279                         reg.overlapping_regs(|r| {
280                             if allocated.get(&r).copied().unwrap_or_default().1 {
281                                 used = true;
282                             }
283                         });
284
285                         if !used {
286                             alloc_reg = Some(reg);
287                             break;
288                         }
289                     }
290
291                     let reg = alloc_reg.expect("cannot allocate registers");
292                     regs[i] = Some(reg);
293                     allocated.entry(reg).or_default().1 = true;
294                 }
295                 _ => (),
296             }
297         }
298
299         self.registers = regs;
300     }
301
302     fn allocate_stack_slots(&mut self) {
303         let mut slot_size = Size::from_bytes(0);
304         let mut slots_clobber = vec![None; self.operands.len()];
305         let mut slots_input = vec![None; self.operands.len()];
306         let mut slots_output = vec![None; self.operands.len()];
307
308         let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
309             let reg_size =
310                 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
311             let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
312             let offset = slot_size.align_to(align);
313             *slot_size = offset + reg_size;
314             offset
315         };
316         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
317
318         // Allocate stack slots for saving clobbered registers
319         let abi_clobber =
320             InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, Symbol::intern("C"))
321                 .unwrap()
322                 .clobbered_regs();
323         for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
324             let mut need_save = true;
325             // If the register overlaps with a register clobbered by function call, then
326             // we don't need to save it.
327             for r in abi_clobber {
328                 r.overlapping_regs(|r| {
329                     if r == reg {
330                         need_save = false;
331                     }
332                 });
333
334                 if !need_save {
335                     break;
336                 }
337             }
338
339             if need_save {
340                 slots_clobber[i] = Some(new_slot(reg.reg_class()));
341             }
342         }
343
344         // Allocate stack slots for inout
345         for (i, operand) in self.operands.iter().enumerate() {
346             match *operand {
347                 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
348                     let slot = new_slot(reg.reg_class());
349                     slots_input[i] = Some(slot);
350                     slots_output[i] = Some(slot);
351                 }
352                 _ => (),
353             }
354         }
355
356         let slot_size_before_input = slot_size;
357         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
358
359         // Allocate stack slots for input
360         for (i, operand) in self.operands.iter().enumerate() {
361             match *operand {
362                 InlineAsmOperand::In { reg, .. }
363                 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
364                     slots_input[i] = Some(new_slot(reg.reg_class()));
365                 }
366                 _ => (),
367             }
368         }
369
370         // Reset slot size to before input so that input and output operands can overlap
371         // and save some memory.
372         let slot_size_after_input = slot_size;
373         slot_size = slot_size_before_input;
374         let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
375
376         // Allocate stack slots for output
377         for (i, operand) in self.operands.iter().enumerate() {
378             match *operand {
379                 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
380                     slots_output[i] = Some(new_slot(reg.reg_class()));
381                 }
382                 _ => (),
383             }
384         }
385
386         slot_size = slot_size.max(slot_size_after_input);
387
388         self.stack_slots_clobber = slots_clobber;
389         self.stack_slots_input = slots_input;
390         self.stack_slots_output = slots_output;
391         self.stack_slot_size = slot_size;
392     }
393
394     fn generate_asm_wrapper(&self, asm_name: &str) -> String {
395         let mut generated_asm = String::new();
396         writeln!(generated_asm, ".globl {}", asm_name).unwrap();
397         writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
398         writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
399         writeln!(generated_asm, "{}:", asm_name).unwrap();
400
401         let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
402
403         if is_x86 {
404             generated_asm.push_str(".intel_syntax noprefix\n");
405         }
406         Self::prologue(&mut generated_asm, self.arch);
407
408         // Save clobbered registers
409         if !self.options.contains(InlineAsmOptions::NORETURN) {
410             for (reg, slot) in self
411                 .registers
412                 .iter()
413                 .zip(self.stack_slots_clobber.iter().copied())
414                 .filter_map(|(r, s)| r.zip(s))
415             {
416                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
417             }
418         }
419
420         // Write input registers
421         for (reg, slot) in self
422             .registers
423             .iter()
424             .zip(self.stack_slots_input.iter().copied())
425             .filter_map(|(r, s)| r.zip(s))
426         {
427             Self::restore_register(&mut generated_asm, self.arch, reg, slot);
428         }
429
430         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
431             generated_asm.push_str(".att_syntax\n");
432         }
433
434         // The actual inline asm
435         for piece in self.template {
436             match piece {
437                 InlineAsmTemplatePiece::String(s) => {
438                     generated_asm.push_str(s);
439                 }
440                 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
441                     if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
442                         generated_asm.push('%');
443                     }
444                     self.registers[*operand_idx]
445                         .unwrap()
446                         .emit(&mut generated_asm, self.arch, *modifier)
447                         .unwrap();
448                 }
449             }
450         }
451         generated_asm.push('\n');
452
453         if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
454             generated_asm.push_str(".intel_syntax noprefix\n");
455         }
456
457         if !self.options.contains(InlineAsmOptions::NORETURN) {
458             // Read output registers
459             for (reg, slot) in self
460                 .registers
461                 .iter()
462                 .zip(self.stack_slots_output.iter().copied())
463                 .filter_map(|(r, s)| r.zip(s))
464             {
465                 Self::save_register(&mut generated_asm, self.arch, reg, slot);
466             }
467
468             // Restore clobbered registers
469             for (reg, slot) in self
470                 .registers
471                 .iter()
472                 .zip(self.stack_slots_clobber.iter().copied())
473                 .filter_map(|(r, s)| r.zip(s))
474             {
475                 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
476             }
477
478             Self::epilogue(&mut generated_asm, self.arch);
479         } else {
480             Self::epilogue_noreturn(&mut generated_asm, self.arch);
481         }
482
483         if is_x86 {
484             generated_asm.push_str(".att_syntax\n");
485         }
486         writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
487         generated_asm.push_str(".text\n");
488         generated_asm.push_str("\n\n");
489
490         generated_asm
491     }
492
493     fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
494         match arch {
495             InlineAsmArch::X86 => {
496                 generated_asm.push_str("    push ebp\n");
497                 generated_asm.push_str("    mov ebp,[esp+8]\n");
498             }
499             InlineAsmArch::X86_64 => {
500                 generated_asm.push_str("    push rbp\n");
501                 generated_asm.push_str("    mov rbp,rdi\n");
502             }
503             InlineAsmArch::RiscV32 => {
504                 generated_asm.push_str("    addi sp, sp, -8\n");
505                 generated_asm.push_str("    sw ra, 4(sp)\n");
506                 generated_asm.push_str("    sw s0, 0(sp)\n");
507                 generated_asm.push_str("    mv s0, a0\n");
508             }
509             InlineAsmArch::RiscV64 => {
510                 generated_asm.push_str("    addi sp, sp, -16\n");
511                 generated_asm.push_str("    sd ra, 8(sp)\n");
512                 generated_asm.push_str("    sd s0, 0(sp)\n");
513                 generated_asm.push_str("    mv s0, a0\n");
514             }
515             _ => unimplemented!("prologue for {:?}", arch),
516         }
517     }
518
519     fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
520         match arch {
521             InlineAsmArch::X86 => {
522                 generated_asm.push_str("    pop ebp\n");
523                 generated_asm.push_str("    ret\n");
524             }
525             InlineAsmArch::X86_64 => {
526                 generated_asm.push_str("    pop rbp\n");
527                 generated_asm.push_str("    ret\n");
528             }
529             InlineAsmArch::RiscV32 => {
530                 generated_asm.push_str("    lw s0, 0(sp)\n");
531                 generated_asm.push_str("    lw ra, 4(sp)\n");
532                 generated_asm.push_str("    addi sp, sp, 8\n");
533                 generated_asm.push_str("    ret\n");
534             }
535             InlineAsmArch::RiscV64 => {
536                 generated_asm.push_str("    ld s0, 0(sp)\n");
537                 generated_asm.push_str("    ld ra, 8(sp)\n");
538                 generated_asm.push_str("    addi sp, sp, 16\n");
539                 generated_asm.push_str("    ret\n");
540             }
541             _ => unimplemented!("epilogue for {:?}", arch),
542         }
543     }
544
545     fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
546         match arch {
547             InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
548                 generated_asm.push_str("    ud2\n");
549             }
550             InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
551                 generated_asm.push_str("    ebreak\n");
552             }
553             _ => unimplemented!("epilogue_noreturn for {:?}", arch),
554         }
555     }
556
557     fn save_register(
558         generated_asm: &mut String,
559         arch: InlineAsmArch,
560         reg: InlineAsmReg,
561         offset: Size,
562     ) {
563         match arch {
564             InlineAsmArch::X86 => {
565                 write!(generated_asm, "    mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
566                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
567                 generated_asm.push('\n');
568             }
569             InlineAsmArch::X86_64 => {
570                 write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
571                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
572                 generated_asm.push('\n');
573             }
574             InlineAsmArch::RiscV32 => {
575                 generated_asm.push_str("    sw ");
576                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
577                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
578             }
579             InlineAsmArch::RiscV64 => {
580                 generated_asm.push_str("    sd ");
581                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
582                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
583             }
584             _ => unimplemented!("save_register for {:?}", arch),
585         }
586     }
587
588     fn restore_register(
589         generated_asm: &mut String,
590         arch: InlineAsmArch,
591         reg: InlineAsmReg,
592         offset: Size,
593     ) {
594         match arch {
595             InlineAsmArch::X86 => {
596                 generated_asm.push_str("    mov ");
597                 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
598                 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
599             }
600             InlineAsmArch::X86_64 => {
601                 generated_asm.push_str("    mov ");
602                 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
603                 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
604             }
605             InlineAsmArch::RiscV32 => {
606                 generated_asm.push_str("    lw ");
607                 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
608                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
609             }
610             InlineAsmArch::RiscV64 => {
611                 generated_asm.push_str("    ld ");
612                 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
613                 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
614             }
615             _ => unimplemented!("restore_register for {:?}", arch),
616         }
617     }
618 }
619
620 fn call_inline_asm<'tcx>(
621     fx: &mut FunctionCx<'_, '_, 'tcx>,
622     asm_name: &str,
623     slot_size: Size,
624     inputs: Vec<(Size, Value)>,
625     outputs: Vec<(Size, CPlace<'tcx>)>,
626 ) {
627     let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
628         kind: StackSlotKind::ExplicitSlot,
629         size: u32::try_from(slot_size.bytes()).unwrap(),
630     });
631     if fx.clif_comments.enabled() {
632         fx.add_comment(stack_slot, "inline asm scratch slot");
633     }
634
635     let inline_asm_func = fx
636         .module
637         .declare_function(
638             asm_name,
639             Linkage::Import,
640             &Signature {
641                 call_conv: CallConv::SystemV,
642                 params: vec![AbiParam::new(fx.pointer_type)],
643                 returns: vec![],
644             },
645         )
646         .unwrap();
647     let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
648     if fx.clif_comments.enabled() {
649         fx.add_comment(inline_asm_func, asm_name);
650     }
651
652     for (offset, value) in inputs {
653         fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
654     }
655
656     let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
657     fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
658
659     for (offset, place) in outputs {
660         let ty = fx.clif_type(place.layout().ty).unwrap();
661         let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
662         place.write_cvalue(fx, CValue::by_val(value, place.layout()));
663     }
664 }