1 //! Codegen of [`asm!`] invocations.
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
9 use rustc_span::Symbol;
10 use rustc_target::asm::*;
12 pub(crate) fn codegen_inline_asm<'tcx>(
13 fx: &mut FunctionCx<'_, '_, 'tcx>,
15 template: &[InlineAsmTemplatePiece],
16 operands: &[InlineAsmOperand<'tcx>],
17 options: InlineAsmOptions,
19 // FIXME add .eh_frame unwind info directives
21 if template.is_empty() {
24 } else if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
25 let true_ = fx.bcx.ins().iconst(types::I32, 1);
26 fx.bcx.ins().trapnz(true_, TrapCode::User(1));
28 } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
31 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
33 && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
34 && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
35 && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
36 && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
39 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
42 assert_eq!(operands.len(), 4);
43 let (leaf, eax_place) = match operands[1] {
44 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
47 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
50 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
51 crate::base::codegen_place(fx, out_place.unwrap()),
56 let ebx_place = match operands[0] {
57 InlineAsmOperand::Out { reg, late: true, place } => {
60 InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
61 X86InlineAsmRegClass::reg
64 crate::base::codegen_place(fx, place.unwrap())
68 let (sub_leaf, ecx_place) = match operands[2] {
69 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
72 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
75 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
76 crate::base::codegen_place(fx, out_place.unwrap()),
81 let edx_place = match operands[3] {
82 InlineAsmOperand::Out { reg, late: true, place } => {
85 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
87 crate::base::codegen_place(fx, place.unwrap())
92 let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
94 eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
95 ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
96 ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
97 edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
99 } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
100 // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
101 crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
102 } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
103 crate::trap::trap_unimplemented(fx, "Alloca is not supported");
106 let mut clobbered_regs = Vec::new();
107 let mut inputs = Vec::new();
108 let mut outputs = Vec::new();
110 let mut asm_gen = InlineAssemblyGenerator {
112 arch: InlineAsmArch::X86_64,
116 registers: Vec::new(),
117 stack_slots_clobber: Vec::new(),
118 stack_slots_input: Vec::new(),
119 stack_slots_output: Vec::new(),
120 stack_slot_size: Size::from_bytes(0),
122 asm_gen.allocate_registers();
123 asm_gen.allocate_stack_slots();
125 // FIXME overlap input and output slots to save stack space
126 for (i, operand) in operands.iter().enumerate() {
128 InlineAsmOperand::In { reg, ref value } => {
129 let reg = asm_gen.registers[i].unwrap();
130 clobbered_regs.push((reg, asm_gen.stack_slots_clobber[i].unwrap()));
133 asm_gen.stack_slots_input[i].unwrap(),
134 crate::base::codegen_operand(fx, value).load_scalar(fx),
137 InlineAsmOperand::Out { reg, late: _, place } => {
138 let reg = asm_gen.registers[i].unwrap();
139 clobbered_regs.push((reg, asm_gen.stack_slots_clobber[i].unwrap()));
140 if let Some(place) = place {
143 asm_gen.stack_slots_output[i].unwrap(),
144 crate::base::codegen_place(fx, place),
148 InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
149 let reg = asm_gen.registers[i].unwrap();
150 clobbered_regs.push((reg, asm_gen.stack_slots_clobber[i].unwrap()));
153 asm_gen.stack_slots_input[i].unwrap(),
154 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
156 if let Some(out_place) = out_place {
159 asm_gen.stack_slots_output[i].unwrap(),
160 crate::base::codegen_place(fx, out_place),
164 InlineAsmOperand::Const { value: _ } => todo!(),
165 InlineAsmOperand::SymFn { value: _ } => todo!(),
166 InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
170 let inline_asm_index = fx.inline_asm_index;
171 fx.inline_asm_index += 1;
172 let asm_name = format!("{}__inline_asm_{}", fx.symbol_name, inline_asm_index);
174 let generated_asm = asm_gen.generate_asm_wrapper(&asm_name, clobbered_regs, &inputs, &outputs);
175 fx.cx.global_asm.push_str(&generated_asm);
177 call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
180 struct InlineAssemblyGenerator<'a, 'tcx> {
183 template: &'a [InlineAsmTemplatePiece],
184 operands: &'a [InlineAsmOperand<'tcx>],
185 options: InlineAsmOptions,
186 registers: Vec<Option<InlineAsmReg>>,
187 stack_slots_clobber: Vec<Option<Size>>,
188 stack_slots_input: Vec<Option<Size>>,
189 stack_slots_output: Vec<Option<Size>>,
190 stack_slot_size: Size,
193 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
194 fn allocate_registers(&mut self) {
195 let sess = self.tcx.sess;
196 let map = allocatable_registers(
198 |feature| sess.target_features.contains(&Symbol::intern(feature)),
201 let mut allocated = FxHashMap::<_, (bool, bool)>::default();
202 let mut regs = vec![None; self.operands.len()];
204 // Add explicit registers to the allocated set.
205 for (i, operand) in self.operands.iter().enumerate() {
207 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
209 allocated.entry(reg).or_default().0 = true;
211 InlineAsmOperand::Out {
212 reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
215 allocated.entry(reg).or_default().1 = true;
217 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
218 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
220 allocated.insert(reg, (true, true));
226 // Allocate out/inout/inlateout registers first because they are more constrained.
227 for (i, operand) in self.operands.iter().enumerate() {
229 InlineAsmOperand::Out {
230 reg: InlineAsmRegOrRegClass::RegClass(class),
234 | InlineAsmOperand::InOut {
235 reg: InlineAsmRegOrRegClass::RegClass(class), ..
237 let mut alloc_reg = None;
238 for ® in &map[&class] {
239 let mut used = false;
240 reg.overlapping_regs(|r| {
241 if allocated.contains_key(&r) {
247 alloc_reg = Some(reg);
252 let reg = alloc_reg.expect("cannot allocate registers");
254 allocated.insert(reg, (true, true));
260 // Allocate in/lateout.
261 for (i, operand) in self.operands.iter().enumerate() {
263 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
264 let mut alloc_reg = None;
265 for ® in &map[&class] {
266 let mut used = false;
267 reg.overlapping_regs(|r| {
268 if allocated.get(&r).copied().unwrap_or_default().0 {
274 alloc_reg = Some(reg);
279 let reg = alloc_reg.expect("cannot allocate registers");
281 allocated.entry(reg).or_default().0 = true;
283 InlineAsmOperand::Out {
284 reg: InlineAsmRegOrRegClass::RegClass(class),
288 let mut alloc_reg = None;
289 for ® in &map[&class] {
290 let mut used = false;
291 reg.overlapping_regs(|r| {
292 if allocated.get(&r).copied().unwrap_or_default().1 {
298 alloc_reg = Some(reg);
303 let reg = alloc_reg.expect("cannot allocate registers");
305 allocated.entry(reg).or_default().1 = true;
311 self.registers = regs;
314 fn allocate_stack_slots(&mut self) {
315 let mut slot_size = Size::from_bytes(0);
316 let mut slots_clobber = vec![None; self.operands.len()];
317 let mut slots_input = vec![None; self.operands.len()];
318 let mut slots_output = vec![None; self.operands.len()];
320 let mut new_slot = |reg_class: InlineAsmRegClass| {
321 let reg_size = reg_class
322 .supported_types(InlineAsmArch::X86_64)
324 .map(|(ty, _)| ty.size())
327 let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
328 slot_size = slot_size.align_to(align);
329 let offset = slot_size;
330 slot_size += reg_size;
334 // FIXME overlap input and output slots to save stack space
335 for (i, operand) in self.operands.iter().enumerate() {
337 InlineAsmOperand::In { reg, .. } => {
338 slots_clobber[i] = Some(new_slot(reg.reg_class()));
339 slots_input[i] = Some(new_slot(reg.reg_class()));
341 InlineAsmOperand::Out { reg, place, .. } => {
342 slots_clobber[i] = Some(new_slot(reg.reg_class()));
344 slots_output[i] = Some(new_slot(reg.reg_class()));
347 InlineAsmOperand::InOut { reg, out_place, .. } => {
348 slots_clobber[i] = Some(new_slot(reg.reg_class()));
349 let slot = new_slot(reg.reg_class());
350 slots_input[i] = Some(slot);
351 if out_place.is_some() {
352 slots_output[i] = Some(slot);
355 InlineAsmOperand::Const { value: _ } => (),
356 InlineAsmOperand::SymFn { value: _ } => (),
357 InlineAsmOperand::SymStatic { def_id: _ } => (),
361 self.stack_slots_clobber = slots_clobber;
362 self.stack_slots_input = slots_input;
363 self.stack_slots_output = slots_output;
364 self.stack_slot_size = slot_size;
367 fn generate_asm_wrapper(
370 clobbered_regs: Vec<(InlineAsmReg, Size)>,
371 inputs: &[(InlineAsmReg, Size, Value)],
372 outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
374 let mut generated_asm = String::new();
375 writeln!(generated_asm, ".globl {}", asm_name).unwrap();
376 writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
377 writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
378 writeln!(generated_asm, "{}:", asm_name).unwrap();
380 generated_asm.push_str(".intel_syntax noprefix\n");
381 generated_asm.push_str(" push rbp\n");
382 generated_asm.push_str(" mov rbp,rdi\n");
384 // Save clobbered registers
385 if !self.options.contains(InlineAsmOptions::NORETURN) {
386 // FIXME skip registers saved by the calling convention
387 for &(reg, offset) in &clobbered_regs {
388 save_register(&mut generated_asm, self.arch, reg, offset);
392 // Write input registers
393 for &(reg, offset, _value) in inputs {
394 restore_register(&mut generated_asm, self.arch, reg, offset);
397 if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
398 generated_asm.push_str(".att_syntax\n");
401 // The actual inline asm
402 for piece in self.template {
404 InlineAsmTemplatePiece::String(s) => {
405 generated_asm.push_str(s);
407 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
408 self.registers[*operand_idx]
410 .emit(&mut generated_asm, self.arch, *modifier)
415 generated_asm.push('\n');
417 if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
418 generated_asm.push_str(".intel_syntax noprefix\n");
421 if !self.options.contains(InlineAsmOptions::NORETURN) {
422 // Read output registers
423 for &(reg, offset, _place) in outputs {
424 save_register(&mut generated_asm, self.arch, reg, offset);
427 // Restore clobbered registers
428 for &(reg, offset) in clobbered_regs.iter().rev() {
429 restore_register(&mut generated_asm, self.arch, reg, offset);
432 generated_asm.push_str(" pop rbp\n");
433 generated_asm.push_str(" ret\n");
435 generated_asm.push_str(" ud2\n");
438 generated_asm.push_str(".att_syntax\n");
439 writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
440 generated_asm.push_str(".text\n");
441 generated_asm.push_str("\n\n");
447 fn call_inline_asm<'tcx>(
448 fx: &mut FunctionCx<'_, '_, 'tcx>,
451 inputs: Vec<(InlineAsmReg, Size, Value)>,
452 outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
454 let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
455 kind: StackSlotKind::ExplicitSlot,
456 size: u32::try_from(slot_size.bytes()).unwrap(),
458 if fx.clif_comments.enabled() {
459 fx.add_comment(stack_slot, "inline asm scratch slot");
462 let inline_asm_func = fx
468 call_conv: CallConv::SystemV,
469 params: vec![AbiParam::new(fx.pointer_type)],
474 let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
475 if fx.clif_comments.enabled() {
476 fx.add_comment(inline_asm_func, asm_name);
479 for (_reg, offset, value) in inputs {
480 fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
483 let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
484 fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
486 for (_reg, offset, place) in outputs {
487 let ty = fx.clif_type(place.layout().ty).unwrap();
488 let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
489 place.write_cvalue(fx, CValue::by_val(value, place.layout()));
493 fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
495 InlineAsmArch::X86_64 => {
496 write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
497 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
498 generated_asm.push('\n');
500 _ => unimplemented!("save_register for {:?}", arch),
505 generated_asm: &mut String,
511 InlineAsmArch::X86_64 => {
512 generated_asm.push_str(" mov ");
513 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
514 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
516 _ => unimplemented!("restore_register for {:?}", arch),