1 //! Codegen of `asm!` invocations.
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
10 use rustc_target::asm::*;
12 pub(crate) fn codegen_inline_asm<'tcx>(
13 fx: &mut FunctionCx<'_, '_, 'tcx>,
15 template: &[InlineAsmTemplatePiece],
16 operands: &[InlineAsmOperand<'tcx>],
17 options: InlineAsmOptions,
18 destination: Option<mir::BasicBlock>,
20 // FIXME add .eh_frame unwind info directives
22 if !template.is_empty() {
23 // Used by panic_abort
24 if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
25 fx.bcx.ins().trap(TrapCode::User(1));
30 if template[0] == InlineAsmTemplatePiece::String("mov ".to_string())
33 InlineAsmTemplatePiece::Placeholder {
39 && template[2] == InlineAsmTemplatePiece::String(", rbx".to_string())
40 && template[3] == InlineAsmTemplatePiece::String("\n".to_string())
41 && template[4] == InlineAsmTemplatePiece::String("cpuid".to_string())
42 && template[5] == InlineAsmTemplatePiece::String("\n".to_string())
43 && template[6] == InlineAsmTemplatePiece::String("xchg ".to_string())
46 InlineAsmTemplatePiece::Placeholder {
52 && template[8] == InlineAsmTemplatePiece::String(", rbx".to_string())
54 assert_eq!(operands.len(), 4);
55 let (leaf, eax_place) = match operands[1] {
56 InlineAsmOperand::InOut {
57 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
60 out_place: Some(out_place),
62 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
63 crate::base::codegen_place(fx, out_place),
67 let ebx_place = match operands[0] {
68 InlineAsmOperand::Out {
70 InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
71 X86InlineAsmRegClass::reg,
75 } => crate::base::codegen_place(fx, place),
78 let (sub_leaf, ecx_place) = match operands[2] {
79 InlineAsmOperand::InOut {
80 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
83 out_place: Some(out_place),
85 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
86 crate::base::codegen_place(fx, out_place),
90 let edx_place = match operands[3] {
91 InlineAsmOperand::Out {
92 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
95 } => crate::base::codegen_place(fx, place),
99 let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
101 eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
102 ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
103 ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
104 edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
105 let destination_block = fx.get_block(destination.unwrap());
106 fx.bcx.ins().jump(destination_block, &[]);
110 // Used by compiler-builtins
111 if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
112 // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
113 crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
115 } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
116 crate::trap::trap_unimplemented(fx, "Alloca is not supported");
121 if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
122 && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
123 && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
126 InlineAsmTemplatePiece::Placeholder {
132 && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
133 && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
134 && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
135 && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
138 InlineAsmTemplatePiece::Placeholder {
144 && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
146 let destination_block = fx.get_block(destination.unwrap());
147 fx.bcx.ins().jump(destination_block, &[]);
149 } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
150 // Return zero dummy values for all performance counters
152 InlineAsmOperand::In {
153 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
158 let lo = match operands[1] {
159 InlineAsmOperand::Out {
160 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
163 } => crate::base::codegen_place(fx, place),
166 let hi = match operands[2] {
167 InlineAsmOperand::Out {
168 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
171 } => crate::base::codegen_place(fx, place),
175 let u32_layout = fx.layout_of(fx.tcx.types.u32);
176 let zero = fx.bcx.ins().iconst(types::I32, 0);
177 lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
178 hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
180 let destination_block = fx.get_block(destination.unwrap());
181 fx.bcx.ins().jump(destination_block, &[]);
183 } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
186 InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
188 && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
191 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
193 && template[4] == InlineAsmTemplatePiece::String(")".to_string())
195 let destination_block = fx.get_block(destination.unwrap());
196 fx.bcx.ins().jump(destination_block, &[]);
201 let mut inputs = Vec::new();
202 let mut outputs = Vec::new();
204 let mut asm_gen = InlineAssemblyGenerator {
206 arch: fx.tcx.sess.asm_arch.unwrap(),
207 enclosing_def_id: fx.instance.def_id(),
211 registers: Vec::new(),
212 stack_slots_clobber: Vec::new(),
213 stack_slots_input: Vec::new(),
214 stack_slots_output: Vec::new(),
215 stack_slot_size: Size::from_bytes(0),
217 asm_gen.allocate_registers();
218 asm_gen.allocate_stack_slots();
220 let inline_asm_index = fx.cx.inline_asm_index.get();
221 fx.cx.inline_asm_index.set(inline_asm_index + 1);
222 let asm_name = format!(
223 "__inline_asm_{}_n{}",
224 fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
228 let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
229 fx.cx.global_asm.push_str(&generated_asm);
231 for (i, operand) in operands.iter().enumerate() {
233 InlineAsmOperand::In { reg: _, ref value } => {
235 asm_gen.stack_slots_input[i].unwrap(),
236 crate::base::codegen_operand(fx, value).load_scalar(fx),
239 InlineAsmOperand::Out { reg: _, late: _, place } => {
240 if let Some(place) = place {
242 asm_gen.stack_slots_output[i].unwrap(),
243 crate::base::codegen_place(fx, place),
247 InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
249 asm_gen.stack_slots_input[i].unwrap(),
250 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
252 if let Some(out_place) = out_place {
254 asm_gen.stack_slots_output[i].unwrap(),
255 crate::base::codegen_place(fx, out_place),
259 InlineAsmOperand::Const { value: _ } => todo!(),
260 InlineAsmOperand::SymFn { value: _ } => todo!(),
261 InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
265 call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
268 Some(destination) => {
269 let destination_block = fx.get_block(destination);
270 fx.bcx.ins().jump(destination_block, &[]);
273 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
278 struct InlineAssemblyGenerator<'a, 'tcx> {
281 enclosing_def_id: DefId,
282 template: &'a [InlineAsmTemplatePiece],
283 operands: &'a [InlineAsmOperand<'tcx>],
284 options: InlineAsmOptions,
285 registers: Vec<Option<InlineAsmReg>>,
286 stack_slots_clobber: Vec<Option<Size>>,
287 stack_slots_input: Vec<Option<Size>>,
288 stack_slots_output: Vec<Option<Size>>,
289 stack_slot_size: Size,
292 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
293 fn allocate_registers(&mut self) {
294 let sess = self.tcx.sess;
295 let map = allocatable_registers(
297 sess.relocation_model(),
298 self.tcx.asm_target_features(self.enclosing_def_id),
301 let mut allocated = FxHashMap::<_, (bool, bool)>::default();
302 let mut regs = vec![None; self.operands.len()];
304 // Add explicit registers to the allocated set.
305 for (i, operand) in self.operands.iter().enumerate() {
307 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
309 allocated.entry(reg).or_default().0 = true;
311 InlineAsmOperand::Out {
312 reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
315 allocated.entry(reg).or_default().1 = true;
317 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
318 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
320 allocated.insert(reg, (true, true));
326 // Allocate out/inout/inlateout registers first because they are more constrained.
327 for (i, operand) in self.operands.iter().enumerate() {
329 InlineAsmOperand::Out {
330 reg: InlineAsmRegOrRegClass::RegClass(class),
334 | InlineAsmOperand::InOut {
335 reg: InlineAsmRegOrRegClass::RegClass(class), ..
337 let mut alloc_reg = None;
338 for ® in &map[&class] {
339 let mut used = false;
340 reg.overlapping_regs(|r| {
341 if allocated.contains_key(&r) {
347 alloc_reg = Some(reg);
352 let reg = alloc_reg.expect("cannot allocate registers");
354 allocated.insert(reg, (true, true));
360 // Allocate in/lateout.
361 for (i, operand) in self.operands.iter().enumerate() {
363 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
364 let mut alloc_reg = None;
365 for ® in &map[&class] {
366 let mut used = false;
367 reg.overlapping_regs(|r| {
368 if allocated.get(&r).copied().unwrap_or_default().0 {
374 alloc_reg = Some(reg);
379 let reg = alloc_reg.expect("cannot allocate registers");
381 allocated.entry(reg).or_default().0 = true;
383 InlineAsmOperand::Out {
384 reg: InlineAsmRegOrRegClass::RegClass(class),
388 let mut alloc_reg = None;
389 for ® in &map[&class] {
390 let mut used = false;
391 reg.overlapping_regs(|r| {
392 if allocated.get(&r).copied().unwrap_or_default().1 {
398 alloc_reg = Some(reg);
403 let reg = alloc_reg.expect("cannot allocate registers");
405 allocated.entry(reg).or_default().1 = true;
411 self.registers = regs;
414 fn allocate_stack_slots(&mut self) {
415 let mut slot_size = Size::from_bytes(0);
416 let mut slots_clobber = vec![None; self.operands.len()];
417 let mut slots_input = vec![None; self.operands.len()];
418 let mut slots_output = vec![None; self.operands.len()];
420 let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
422 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
423 let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
424 let offset = slot_size.align_to(align);
425 *slot_size = offset + reg_size;
428 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
430 // Allocate stack slots for saving clobbered registers
431 let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
434 for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
435 let mut need_save = true;
436 // If the register overlaps with a register clobbered by function call, then
437 // we don't need to save it.
438 for r in abi_clobber {
439 r.overlapping_regs(|r| {
451 slots_clobber[i] = Some(new_slot(reg.reg_class()));
455 // Allocate stack slots for inout
456 for (i, operand) in self.operands.iter().enumerate() {
458 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
459 let slot = new_slot(reg.reg_class());
460 slots_input[i] = Some(slot);
461 slots_output[i] = Some(slot);
467 let slot_size_before_input = slot_size;
468 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
470 // Allocate stack slots for input
471 for (i, operand) in self.operands.iter().enumerate() {
473 InlineAsmOperand::In { reg, .. }
474 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
475 slots_input[i] = Some(new_slot(reg.reg_class()));
481 // Reset slot size to before input so that input and output operands can overlap
482 // and save some memory.
483 let slot_size_after_input = slot_size;
484 slot_size = slot_size_before_input;
485 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
487 // Allocate stack slots for output
488 for (i, operand) in self.operands.iter().enumerate() {
490 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
491 slots_output[i] = Some(new_slot(reg.reg_class()));
497 slot_size = slot_size.max(slot_size_after_input);
499 self.stack_slots_clobber = slots_clobber;
500 self.stack_slots_input = slots_input;
501 self.stack_slots_output = slots_output;
502 self.stack_slot_size = slot_size;
505 fn generate_asm_wrapper(&self, asm_name: &str) -> String {
506 let mut generated_asm = String::new();
507 writeln!(generated_asm, ".globl {}", asm_name).unwrap();
508 writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
509 writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
510 writeln!(generated_asm, "{}:", asm_name).unwrap();
512 let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
515 generated_asm.push_str(".intel_syntax noprefix\n");
517 Self::prologue(&mut generated_asm, self.arch);
519 // Save clobbered registers
520 if !self.options.contains(InlineAsmOptions::NORETURN) {
521 for (reg, slot) in self
524 .zip(self.stack_slots_clobber.iter().copied())
525 .filter_map(|(r, s)| r.zip(s))
527 Self::save_register(&mut generated_asm, self.arch, reg, slot);
531 // Write input registers
532 for (reg, slot) in self
535 .zip(self.stack_slots_input.iter().copied())
536 .filter_map(|(r, s)| r.zip(s))
538 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
541 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
542 generated_asm.push_str(".att_syntax\n");
545 // The actual inline asm
546 for piece in self.template {
548 InlineAsmTemplatePiece::String(s) => {
549 generated_asm.push_str(s);
551 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
552 if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
553 generated_asm.push('%');
555 self.registers[*operand_idx]
557 .emit(&mut generated_asm, self.arch, *modifier)
562 generated_asm.push('\n');
564 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
565 generated_asm.push_str(".intel_syntax noprefix\n");
568 if !self.options.contains(InlineAsmOptions::NORETURN) {
569 // Read output registers
570 for (reg, slot) in self
573 .zip(self.stack_slots_output.iter().copied())
574 .filter_map(|(r, s)| r.zip(s))
576 Self::save_register(&mut generated_asm, self.arch, reg, slot);
579 // Restore clobbered registers
580 for (reg, slot) in self
583 .zip(self.stack_slots_clobber.iter().copied())
584 .filter_map(|(r, s)| r.zip(s))
586 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
589 Self::epilogue(&mut generated_asm, self.arch);
591 Self::epilogue_noreturn(&mut generated_asm, self.arch);
595 generated_asm.push_str(".att_syntax\n");
597 writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
598 generated_asm.push_str(".text\n");
599 generated_asm.push_str("\n\n");
604 fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
606 InlineAsmArch::X86 => {
607 generated_asm.push_str(" push ebp\n");
608 generated_asm.push_str(" mov ebp,[esp+8]\n");
610 InlineAsmArch::X86_64 => {
611 generated_asm.push_str(" push rbp\n");
612 generated_asm.push_str(" mov rbp,rdi\n");
614 InlineAsmArch::RiscV32 => {
615 generated_asm.push_str(" addi sp, sp, -8\n");
616 generated_asm.push_str(" sw ra, 4(sp)\n");
617 generated_asm.push_str(" sw s0, 0(sp)\n");
618 generated_asm.push_str(" mv s0, a0\n");
620 InlineAsmArch::RiscV64 => {
621 generated_asm.push_str(" addi sp, sp, -16\n");
622 generated_asm.push_str(" sd ra, 8(sp)\n");
623 generated_asm.push_str(" sd s0, 0(sp)\n");
624 generated_asm.push_str(" mv s0, a0\n");
626 _ => unimplemented!("prologue for {:?}", arch),
630 fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
632 InlineAsmArch::X86 => {
633 generated_asm.push_str(" pop ebp\n");
634 generated_asm.push_str(" ret\n");
636 InlineAsmArch::X86_64 => {
637 generated_asm.push_str(" pop rbp\n");
638 generated_asm.push_str(" ret\n");
640 InlineAsmArch::RiscV32 => {
641 generated_asm.push_str(" lw s0, 0(sp)\n");
642 generated_asm.push_str(" lw ra, 4(sp)\n");
643 generated_asm.push_str(" addi sp, sp, 8\n");
644 generated_asm.push_str(" ret\n");
646 InlineAsmArch::RiscV64 => {
647 generated_asm.push_str(" ld s0, 0(sp)\n");
648 generated_asm.push_str(" ld ra, 8(sp)\n");
649 generated_asm.push_str(" addi sp, sp, 16\n");
650 generated_asm.push_str(" ret\n");
652 _ => unimplemented!("epilogue for {:?}", arch),
656 fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
658 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
659 generated_asm.push_str(" ud2\n");
661 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
662 generated_asm.push_str(" ebreak\n");
664 _ => unimplemented!("epilogue_noreturn for {:?}", arch),
669 generated_asm: &mut String,
675 InlineAsmArch::X86 => {
676 write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
677 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
678 generated_asm.push('\n');
680 InlineAsmArch::X86_64 => {
681 write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
682 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
683 generated_asm.push('\n');
685 InlineAsmArch::RiscV32 => {
686 generated_asm.push_str(" sw ");
687 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
688 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
690 InlineAsmArch::RiscV64 => {
691 generated_asm.push_str(" sd ");
692 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
693 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
695 _ => unimplemented!("save_register for {:?}", arch),
700 generated_asm: &mut String,
706 InlineAsmArch::X86 => {
707 generated_asm.push_str(" mov ");
708 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
709 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
711 InlineAsmArch::X86_64 => {
712 generated_asm.push_str(" mov ");
713 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
714 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
716 InlineAsmArch::RiscV32 => {
717 generated_asm.push_str(" lw ");
718 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
719 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
721 InlineAsmArch::RiscV64 => {
722 generated_asm.push_str(" ld ");
723 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
724 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
726 _ => unimplemented!("restore_register for {:?}", arch),
731 fn call_inline_asm<'tcx>(
732 fx: &mut FunctionCx<'_, '_, 'tcx>,
735 inputs: Vec<(Size, Value)>,
736 outputs: Vec<(Size, CPlace<'tcx>)>,
738 let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
739 kind: StackSlotKind::ExplicitSlot,
740 size: u32::try_from(slot_size.bytes()).unwrap(),
742 if fx.clif_comments.enabled() {
743 fx.add_comment(stack_slot, "inline asm scratch slot");
746 let inline_asm_func = fx
752 call_conv: CallConv::SystemV,
753 params: vec![AbiParam::new(fx.pointer_type)],
758 let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
759 if fx.clif_comments.enabled() {
760 fx.add_comment(inline_asm_func, asm_name);
763 for (offset, value) in inputs {
764 fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
767 let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
768 fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
770 for (offset, place) in outputs {
771 let ty = fx.clif_type(place.layout().ty).unwrap();
772 let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
773 place.write_cvalue(fx, CValue::by_val(value, place.layout()));