1 //! Codegen of `asm!` invocations.
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
10 use rustc_target::asm::*;
12 pub(crate) fn codegen_inline_asm<'tcx>(
13 fx: &mut FunctionCx<'_, '_, 'tcx>,
15 template: &[InlineAsmTemplatePiece],
16 operands: &[InlineAsmOperand<'tcx>],
17 options: InlineAsmOptions,
18 destination: Option<mir::BasicBlock>,
20 // FIXME add .eh_frame unwind info directives
22 if !template.is_empty() {
23 // Used by panic_abort
24 if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
25 fx.bcx.ins().trap(TrapCode::User(1));
30 if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
33 InlineAsmTemplatePiece::Placeholder {
39 && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
40 && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
41 && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
42 && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
45 InlineAsmTemplatePiece::Placeholder {
52 assert_eq!(operands.len(), 4);
53 let (leaf, eax_place) = match operands[1] {
54 InlineAsmOperand::InOut {
55 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
58 out_place: Some(out_place),
60 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
61 crate::base::codegen_place(fx, out_place),
65 let ebx_place = match operands[0] {
66 InlineAsmOperand::Out {
68 InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
69 X86InlineAsmRegClass::reg,
73 } => crate::base::codegen_place(fx, place),
76 let (sub_leaf, ecx_place) = match operands[2] {
77 InlineAsmOperand::InOut {
78 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
81 out_place: Some(out_place),
83 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
84 crate::base::codegen_place(fx, out_place),
88 let edx_place = match operands[3] {
89 InlineAsmOperand::Out {
90 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
93 } => crate::base::codegen_place(fx, place),
97 let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
99 eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
100 ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
101 ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
102 edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
103 let destination_block = fx.get_block(destination.unwrap());
104 fx.bcx.ins().jump(destination_block, &[]);
108 // Used by compiler-builtins
109 if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
110 // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
111 crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
113 } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
114 crate::trap::trap_unimplemented(fx, "Alloca is not supported");
119 if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
120 && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
121 && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
124 InlineAsmTemplatePiece::Placeholder {
130 && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
131 && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
132 && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
133 && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
136 InlineAsmTemplatePiece::Placeholder {
142 && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
144 let destination_block = fx.get_block(destination.unwrap());
145 fx.bcx.ins().jump(destination_block, &[]);
147 } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
148 // Return zero dummy values for all performance counters
150 InlineAsmOperand::In {
151 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
156 let lo = match operands[1] {
157 InlineAsmOperand::Out {
158 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
161 } => crate::base::codegen_place(fx, place),
164 let hi = match operands[2] {
165 InlineAsmOperand::Out {
166 reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
169 } => crate::base::codegen_place(fx, place),
173 let u32_layout = fx.layout_of(fx.tcx.types.u32);
174 let zero = fx.bcx.ins().iconst(types::I32, 0);
175 lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
176 hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
178 let destination_block = fx.get_block(destination.unwrap());
179 fx.bcx.ins().jump(destination_block, &[]);
181 } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
184 InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
186 && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
189 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
191 && template[4] == InlineAsmTemplatePiece::String(")".to_string())
193 let destination_block = fx.get_block(destination.unwrap());
194 fx.bcx.ins().jump(destination_block, &[]);
199 let mut inputs = Vec::new();
200 let mut outputs = Vec::new();
202 let mut asm_gen = InlineAssemblyGenerator {
204 arch: fx.tcx.sess.asm_arch.unwrap(),
205 enclosing_def_id: fx.instance.def_id(),
209 registers: Vec::new(),
210 stack_slots_clobber: Vec::new(),
211 stack_slots_input: Vec::new(),
212 stack_slots_output: Vec::new(),
213 stack_slot_size: Size::from_bytes(0),
215 asm_gen.allocate_registers();
216 asm_gen.allocate_stack_slots();
218 let inline_asm_index = fx.cx.inline_asm_index.get();
219 fx.cx.inline_asm_index.set(inline_asm_index + 1);
220 let asm_name = format!(
221 "__inline_asm_{}_n{}",
222 fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
226 let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
227 fx.cx.global_asm.push_str(&generated_asm);
229 for (i, operand) in operands.iter().enumerate() {
231 InlineAsmOperand::In { reg: _, ref value } => {
233 asm_gen.stack_slots_input[i].unwrap(),
234 crate::base::codegen_operand(fx, value).load_scalar(fx),
237 InlineAsmOperand::Out { reg: _, late: _, place } => {
238 if let Some(place) = place {
240 asm_gen.stack_slots_output[i].unwrap(),
241 crate::base::codegen_place(fx, place),
245 InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
247 asm_gen.stack_slots_input[i].unwrap(),
248 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
250 if let Some(out_place) = out_place {
252 asm_gen.stack_slots_output[i].unwrap(),
253 crate::base::codegen_place(fx, out_place),
257 InlineAsmOperand::Const { value: _ } => todo!(),
258 InlineAsmOperand::SymFn { value: _ } => todo!(),
259 InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
263 call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
266 Some(destination) => {
267 let destination_block = fx.get_block(destination);
268 fx.bcx.ins().jump(destination_block, &[]);
271 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
276 struct InlineAssemblyGenerator<'a, 'tcx> {
279 enclosing_def_id: DefId,
280 template: &'a [InlineAsmTemplatePiece],
281 operands: &'a [InlineAsmOperand<'tcx>],
282 options: InlineAsmOptions,
283 registers: Vec<Option<InlineAsmReg>>,
284 stack_slots_clobber: Vec<Option<Size>>,
285 stack_slots_input: Vec<Option<Size>>,
286 stack_slots_output: Vec<Option<Size>>,
287 stack_slot_size: Size,
290 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
291 fn allocate_registers(&mut self) {
292 let sess = self.tcx.sess;
293 let map = allocatable_registers(
295 sess.relocation_model(),
296 self.tcx.asm_target_features(self.enclosing_def_id),
299 let mut allocated = FxHashMap::<_, (bool, bool)>::default();
300 let mut regs = vec![None; self.operands.len()];
302 // Add explicit registers to the allocated set.
303 for (i, operand) in self.operands.iter().enumerate() {
305 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
307 allocated.entry(reg).or_default().0 = true;
309 InlineAsmOperand::Out {
310 reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
313 allocated.entry(reg).or_default().1 = true;
315 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
316 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
318 allocated.insert(reg, (true, true));
324 // Allocate out/inout/inlateout registers first because they are more constrained.
325 for (i, operand) in self.operands.iter().enumerate() {
327 InlineAsmOperand::Out {
328 reg: InlineAsmRegOrRegClass::RegClass(class),
332 | InlineAsmOperand::InOut {
333 reg: InlineAsmRegOrRegClass::RegClass(class), ..
335 let mut alloc_reg = None;
336 for ® in &map[&class] {
337 let mut used = false;
338 reg.overlapping_regs(|r| {
339 if allocated.contains_key(&r) {
345 alloc_reg = Some(reg);
350 let reg = alloc_reg.expect("cannot allocate registers");
352 allocated.insert(reg, (true, true));
358 // Allocate in/lateout.
359 for (i, operand) in self.operands.iter().enumerate() {
361 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
362 let mut alloc_reg = None;
363 for ® in &map[&class] {
364 let mut used = false;
365 reg.overlapping_regs(|r| {
366 if allocated.get(&r).copied().unwrap_or_default().0 {
372 alloc_reg = Some(reg);
377 let reg = alloc_reg.expect("cannot allocate registers");
379 allocated.entry(reg).or_default().0 = true;
381 InlineAsmOperand::Out {
382 reg: InlineAsmRegOrRegClass::RegClass(class),
386 let mut alloc_reg = None;
387 for ® in &map[&class] {
388 let mut used = false;
389 reg.overlapping_regs(|r| {
390 if allocated.get(&r).copied().unwrap_or_default().1 {
396 alloc_reg = Some(reg);
401 let reg = alloc_reg.expect("cannot allocate registers");
403 allocated.entry(reg).or_default().1 = true;
409 self.registers = regs;
412 fn allocate_stack_slots(&mut self) {
413 let mut slot_size = Size::from_bytes(0);
414 let mut slots_clobber = vec![None; self.operands.len()];
415 let mut slots_input = vec![None; self.operands.len()];
416 let mut slots_output = vec![None; self.operands.len()];
418 let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
420 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
421 let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
422 let offset = slot_size.align_to(align);
423 *slot_size = offset + reg_size;
426 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
428 // Allocate stack slots for saving clobbered registers
429 let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
432 for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
433 let mut need_save = true;
434 // If the register overlaps with a register clobbered by function call, then
435 // we don't need to save it.
436 for r in abi_clobber {
437 r.overlapping_regs(|r| {
449 slots_clobber[i] = Some(new_slot(reg.reg_class()));
453 // Allocate stack slots for inout
454 for (i, operand) in self.operands.iter().enumerate() {
456 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
457 let slot = new_slot(reg.reg_class());
458 slots_input[i] = Some(slot);
459 slots_output[i] = Some(slot);
465 let slot_size_before_input = slot_size;
466 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
468 // Allocate stack slots for input
469 for (i, operand) in self.operands.iter().enumerate() {
471 InlineAsmOperand::In { reg, .. }
472 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
473 slots_input[i] = Some(new_slot(reg.reg_class()));
479 // Reset slot size to before input so that input and output operands can overlap
480 // and save some memory.
481 let slot_size_after_input = slot_size;
482 slot_size = slot_size_before_input;
483 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
485 // Allocate stack slots for output
486 for (i, operand) in self.operands.iter().enumerate() {
488 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
489 slots_output[i] = Some(new_slot(reg.reg_class()));
495 slot_size = slot_size.max(slot_size_after_input);
497 self.stack_slots_clobber = slots_clobber;
498 self.stack_slots_input = slots_input;
499 self.stack_slots_output = slots_output;
500 self.stack_slot_size = slot_size;
503 fn generate_asm_wrapper(&self, asm_name: &str) -> String {
504 let mut generated_asm = String::new();
505 writeln!(generated_asm, ".globl {}", asm_name).unwrap();
506 writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
507 writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
508 writeln!(generated_asm, "{}:", asm_name).unwrap();
510 let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
513 generated_asm.push_str(".intel_syntax noprefix\n");
515 Self::prologue(&mut generated_asm, self.arch);
517 // Save clobbered registers
518 if !self.options.contains(InlineAsmOptions::NORETURN) {
519 for (reg, slot) in self
522 .zip(self.stack_slots_clobber.iter().copied())
523 .filter_map(|(r, s)| r.zip(s))
525 Self::save_register(&mut generated_asm, self.arch, reg, slot);
529 // Write input registers
530 for (reg, slot) in self
533 .zip(self.stack_slots_input.iter().copied())
534 .filter_map(|(r, s)| r.zip(s))
536 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
539 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
540 generated_asm.push_str(".att_syntax\n");
543 // The actual inline asm
544 for piece in self.template {
546 InlineAsmTemplatePiece::String(s) => {
547 generated_asm.push_str(s);
549 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
550 if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
551 generated_asm.push('%');
553 self.registers[*operand_idx]
555 .emit(&mut generated_asm, self.arch, *modifier)
560 generated_asm.push('\n');
562 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
563 generated_asm.push_str(".intel_syntax noprefix\n");
566 if !self.options.contains(InlineAsmOptions::NORETURN) {
567 // Read output registers
568 for (reg, slot) in self
571 .zip(self.stack_slots_output.iter().copied())
572 .filter_map(|(r, s)| r.zip(s))
574 Self::save_register(&mut generated_asm, self.arch, reg, slot);
577 // Restore clobbered registers
578 for (reg, slot) in self
581 .zip(self.stack_slots_clobber.iter().copied())
582 .filter_map(|(r, s)| r.zip(s))
584 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
587 Self::epilogue(&mut generated_asm, self.arch);
589 Self::epilogue_noreturn(&mut generated_asm, self.arch);
593 generated_asm.push_str(".att_syntax\n");
595 writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
596 generated_asm.push_str(".text\n");
597 generated_asm.push_str("\n\n");
602 fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
604 InlineAsmArch::X86 => {
605 generated_asm.push_str(" push ebp\n");
606 generated_asm.push_str(" mov ebp,[esp+8]\n");
608 InlineAsmArch::X86_64 => {
609 generated_asm.push_str(" push rbp\n");
610 generated_asm.push_str(" mov rbp,rdi\n");
612 InlineAsmArch::RiscV32 => {
613 generated_asm.push_str(" addi sp, sp, -8\n");
614 generated_asm.push_str(" sw ra, 4(sp)\n");
615 generated_asm.push_str(" sw s0, 0(sp)\n");
616 generated_asm.push_str(" mv s0, a0\n");
618 InlineAsmArch::RiscV64 => {
619 generated_asm.push_str(" addi sp, sp, -16\n");
620 generated_asm.push_str(" sd ra, 8(sp)\n");
621 generated_asm.push_str(" sd s0, 0(sp)\n");
622 generated_asm.push_str(" mv s0, a0\n");
624 _ => unimplemented!("prologue for {:?}", arch),
628 fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
630 InlineAsmArch::X86 => {
631 generated_asm.push_str(" pop ebp\n");
632 generated_asm.push_str(" ret\n");
634 InlineAsmArch::X86_64 => {
635 generated_asm.push_str(" pop rbp\n");
636 generated_asm.push_str(" ret\n");
638 InlineAsmArch::RiscV32 => {
639 generated_asm.push_str(" lw s0, 0(sp)\n");
640 generated_asm.push_str(" lw ra, 4(sp)\n");
641 generated_asm.push_str(" addi sp, sp, 8\n");
642 generated_asm.push_str(" ret\n");
644 InlineAsmArch::RiscV64 => {
645 generated_asm.push_str(" ld s0, 0(sp)\n");
646 generated_asm.push_str(" ld ra, 8(sp)\n");
647 generated_asm.push_str(" addi sp, sp, 16\n");
648 generated_asm.push_str(" ret\n");
650 _ => unimplemented!("epilogue for {:?}", arch),
654 fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
656 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
657 generated_asm.push_str(" ud2\n");
659 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
660 generated_asm.push_str(" ebreak\n");
662 _ => unimplemented!("epilogue_noreturn for {:?}", arch),
667 generated_asm: &mut String,
673 InlineAsmArch::X86 => {
674 write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
675 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
676 generated_asm.push('\n');
678 InlineAsmArch::X86_64 => {
679 write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
680 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
681 generated_asm.push('\n');
683 InlineAsmArch::RiscV32 => {
684 generated_asm.push_str(" sw ");
685 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
686 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
688 InlineAsmArch::RiscV64 => {
689 generated_asm.push_str(" sd ");
690 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
691 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
693 _ => unimplemented!("save_register for {:?}", arch),
698 generated_asm: &mut String,
704 InlineAsmArch::X86 => {
705 generated_asm.push_str(" mov ");
706 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
707 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
709 InlineAsmArch::X86_64 => {
710 generated_asm.push_str(" mov ");
711 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
712 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
714 InlineAsmArch::RiscV32 => {
715 generated_asm.push_str(" lw ");
716 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
717 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
719 InlineAsmArch::RiscV64 => {
720 generated_asm.push_str(" ld ");
721 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
722 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
724 _ => unimplemented!("restore_register for {:?}", arch),
729 fn call_inline_asm<'tcx>(
730 fx: &mut FunctionCx<'_, '_, 'tcx>,
733 inputs: Vec<(Size, Value)>,
734 outputs: Vec<(Size, CPlace<'tcx>)>,
736 let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
737 kind: StackSlotKind::ExplicitSlot,
738 size: u32::try_from(slot_size.bytes()).unwrap(),
740 if fx.clif_comments.enabled() {
741 fx.add_comment(stack_slot, "inline asm scratch slot");
744 let inline_asm_func = fx
750 call_conv: CallConv::SystemV,
751 params: vec![AbiParam::new(fx.pointer_type)],
756 let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
757 if fx.clif_comments.enabled() {
758 fx.add_comment(inline_asm_func, asm_name);
761 for (offset, value) in inputs {
762 fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
765 let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
766 fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
768 for (offset, place) in outputs {
769 let ty = fx.clif_type(place.layout().ty).unwrap();
770 let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
771 place.write_cvalue(fx, CValue::by_val(value, place.layout()));