]> git.lizzy.rs Git - rust.git/blob - src/inline_asm.rs
Fold `vtable_trait_upcasting_coercion_new_vptr_slot` logic into obligation processing.
[rust.git] / src / inline_asm.rs
1 //! Codegen of [`asm!`] invocations.
2
3 use crate::prelude::*;
4
5 use std::fmt::Write;
6
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_target::asm::*;
10
11 pub(crate) fn codegen_inline_asm<'tcx>(
12     fx: &mut FunctionCx<'_, '_, 'tcx>,
13     _span: Span,
14     template: &[InlineAsmTemplatePiece],
15     operands: &[InlineAsmOperand<'tcx>],
16     options: InlineAsmOptions,
17 ) {
18     // FIXME add .eh_frame unwind info directives
19
20     if template.is_empty() {
21         // Black box
22         return;
23     } else if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
24         let true_ = fx.bcx.ins().iconst(types::I32, 1);
25         fx.bcx.ins().trapnz(true_, TrapCode::User(1));
26         return;
27     } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
28         && matches!(
29             template[1],
30             InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
31         )
32         && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
33         && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
34         && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
35         && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
36         && matches!(
37             template[6],
38             InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
39         )
40     {
41         assert_eq!(operands.len(), 4);
42         let (leaf, eax_place) = match operands[1] {
43             InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
44                 let reg = expect_reg(reg);
45                 assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::ax));
46                 (
47                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
48                     crate::base::codegen_place(fx, out_place.unwrap()),
49                 )
50             }
51             _ => unreachable!(),
52         };
53         let ebx_place = match operands[0] {
54             InlineAsmOperand::Out { reg, late: true, place } => {
55                 assert_eq!(
56                     reg,
57                     InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
58                         X86InlineAsmRegClass::reg
59                     ))
60                 );
61                 crate::base::codegen_place(fx, place.unwrap())
62             }
63             _ => unreachable!(),
64         };
65         let (sub_leaf, ecx_place) = match operands[2] {
66             InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
67                 let reg = expect_reg(reg);
68                 assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::cx));
69                 (
70                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
71                     crate::base::codegen_place(fx, out_place.unwrap()),
72                 )
73             }
74             _ => unreachable!(),
75         };
76         let edx_place = match operands[3] {
77             InlineAsmOperand::Out { reg, late: true, place } => {
78                 let reg = expect_reg(reg);
79                 assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::dx));
80                 crate::base::codegen_place(fx, place.unwrap())
81             }
82             _ => unreachable!(),
83         };
84
85         let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
86
87         eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
88         ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
89         ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
90         edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
91         return;
92     } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
93         // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
94         crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
95     } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
96         crate::trap::trap_unimplemented(fx, "Alloca is not supported");
97     }
98
99     let mut slot_size = Size::from_bytes(0);
100     let mut clobbered_regs = Vec::new();
101     let mut inputs = Vec::new();
102     let mut outputs = Vec::new();
103
104     let mut new_slot = |reg_class: InlineAsmRegClass| {
105         let reg_size = reg_class
106             .supported_types(InlineAsmArch::X86_64)
107             .iter()
108             .map(|(ty, _)| ty.size())
109             .max()
110             .unwrap();
111         let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
112         slot_size = slot_size.align_to(align);
113         let offset = slot_size;
114         slot_size += reg_size;
115         offset
116     };
117
118     // FIXME overlap input and output slots to save stack space
119     for operand in operands {
120         match *operand {
121             InlineAsmOperand::In { reg, ref value } => {
122                 let reg = expect_reg(reg);
123                 clobbered_regs.push((reg, new_slot(reg.reg_class())));
124                 inputs.push((
125                     reg,
126                     new_slot(reg.reg_class()),
127                     crate::base::codegen_operand(fx, value).load_scalar(fx),
128                 ));
129             }
130             InlineAsmOperand::Out { reg, late: _, place } => {
131                 let reg = expect_reg(reg);
132                 clobbered_regs.push((reg, new_slot(reg.reg_class())));
133                 if let Some(place) = place {
134                     outputs.push((
135                         reg,
136                         new_slot(reg.reg_class()),
137                         crate::base::codegen_place(fx, place),
138                     ));
139                 }
140             }
141             InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
142                 let reg = expect_reg(reg);
143                 clobbered_regs.push((reg, new_slot(reg.reg_class())));
144                 inputs.push((
145                     reg,
146                     new_slot(reg.reg_class()),
147                     crate::base::codegen_operand(fx, in_value).load_scalar(fx),
148                 ));
149                 if let Some(out_place) = out_place {
150                     outputs.push((
151                         reg,
152                         new_slot(reg.reg_class()),
153                         crate::base::codegen_place(fx, out_place),
154                     ));
155                 }
156             }
157             InlineAsmOperand::Const { value: _ } => todo!(),
158             InlineAsmOperand::SymFn { value: _ } => todo!(),
159             InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
160         }
161     }
162
163     let inline_asm_index = fx.inline_asm_index;
164     fx.inline_asm_index += 1;
165     let asm_name = format!("{}__inline_asm_{}", fx.symbol_name, inline_asm_index);
166
167     let generated_asm = generate_asm_wrapper(
168         &asm_name,
169         InlineAsmArch::X86_64,
170         options,
171         template,
172         clobbered_regs,
173         &inputs,
174         &outputs,
175     );
176     fx.cx.global_asm.push_str(&generated_asm);
177
178     call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
179 }
180
181 fn generate_asm_wrapper(
182     asm_name: &str,
183     arch: InlineAsmArch,
184     options: InlineAsmOptions,
185     template: &[InlineAsmTemplatePiece],
186     clobbered_regs: Vec<(InlineAsmReg, Size)>,
187     inputs: &[(InlineAsmReg, Size, Value)],
188     outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
189 ) -> String {
190     let mut generated_asm = String::new();
191     writeln!(generated_asm, ".globl {}", asm_name).unwrap();
192     writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
193     writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
194     writeln!(generated_asm, "{}:", asm_name).unwrap();
195
196     generated_asm.push_str(".intel_syntax noprefix\n");
197     generated_asm.push_str("    push rbp\n");
198     generated_asm.push_str("    mov rbp,rdi\n");
199
200     // Save clobbered registers
201     if !options.contains(InlineAsmOptions::NORETURN) {
202         // FIXME skip registers saved by the calling convention
203         for &(reg, offset) in &clobbered_regs {
204             save_register(&mut generated_asm, arch, reg, offset);
205         }
206     }
207
208     // Write input registers
209     for &(reg, offset, _value) in inputs {
210         restore_register(&mut generated_asm, arch, reg, offset);
211     }
212
213     if options.contains(InlineAsmOptions::ATT_SYNTAX) {
214         generated_asm.push_str(".att_syntax\n");
215     }
216
217     // The actual inline asm
218     for piece in template {
219         match piece {
220             InlineAsmTemplatePiece::String(s) => {
221                 generated_asm.push_str(s);
222             }
223             InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
224         }
225     }
226     generated_asm.push('\n');
227
228     if options.contains(InlineAsmOptions::ATT_SYNTAX) {
229         generated_asm.push_str(".intel_syntax noprefix\n");
230     }
231
232     if !options.contains(InlineAsmOptions::NORETURN) {
233         // Read output registers
234         for &(reg, offset, _place) in outputs {
235             save_register(&mut generated_asm, arch, reg, offset);
236         }
237
238         // Restore clobbered registers
239         for &(reg, offset) in clobbered_regs.iter().rev() {
240             restore_register(&mut generated_asm, arch, reg, offset);
241         }
242
243         generated_asm.push_str("    pop rbp\n");
244         generated_asm.push_str("    ret\n");
245     } else {
246         generated_asm.push_str("    ud2\n");
247     }
248
249     generated_asm.push_str(".att_syntax\n");
250     writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
251     generated_asm.push_str(".text\n");
252     generated_asm.push_str("\n\n");
253
254     generated_asm
255 }
256
257 fn call_inline_asm<'tcx>(
258     fx: &mut FunctionCx<'_, '_, 'tcx>,
259     asm_name: &str,
260     slot_size: Size,
261     inputs: Vec<(InlineAsmReg, Size, Value)>,
262     outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
263 ) {
264     let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
265         kind: StackSlotKind::ExplicitSlot,
266         offset: None,
267         size: u32::try_from(slot_size.bytes()).unwrap(),
268     });
269     if fx.clif_comments.enabled() {
270         fx.add_comment(stack_slot, "inline asm scratch slot");
271     }
272
273     let inline_asm_func = fx
274         .module
275         .declare_function(
276             asm_name,
277             Linkage::Import,
278             &Signature {
279                 call_conv: CallConv::SystemV,
280                 params: vec![AbiParam::new(fx.pointer_type)],
281                 returns: vec![],
282             },
283         )
284         .unwrap();
285     let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
286     if fx.clif_comments.enabled() {
287         fx.add_comment(inline_asm_func, asm_name);
288     }
289
290     for (_reg, offset, value) in inputs {
291         fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
292     }
293
294     let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
295     fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
296
297     for (_reg, offset, place) in outputs {
298         let ty = fx.clif_type(place.layout().ty).unwrap();
299         let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
300         place.write_cvalue(fx, CValue::by_val(value, place.layout()));
301     }
302 }
303
304 fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
305     match reg_or_class {
306         InlineAsmRegOrRegClass::Reg(reg) => reg,
307         InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
308     }
309 }
310
311 fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
312     match arch {
313         InlineAsmArch::X86_64 => {
314             write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
315             reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
316             generated_asm.push('\n');
317         }
318         _ => unimplemented!("save_register for {:?}", arch),
319     }
320 }
321
322 fn restore_register(
323     generated_asm: &mut String,
324     arch: InlineAsmArch,
325     reg: InlineAsmReg,
326     offset: Size,
327 ) {
328     match arch {
329         InlineAsmArch::X86_64 => {
330             generated_asm.push_str("    mov ");
331             reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
332             writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
333         }
334         _ => unimplemented!("restore_register for {:?}", arch),
335     }
336 }