1 //! Codegen of `asm!` invocations.
7 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8 use rustc_middle::mir::InlineAsmOperand;
10 use rustc_target::asm::*;
12 pub(crate) fn codegen_inline_asm<'tcx>(
13 fx: &mut FunctionCx<'_, '_, 'tcx>,
15 template: &[InlineAsmTemplatePiece],
16 operands: &[InlineAsmOperand<'tcx>],
17 options: InlineAsmOptions,
18 destination: Option<mir::BasicBlock>,
20 // FIXME add .eh_frame unwind info directives
22 if !template.is_empty() {
23 if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
24 fx.bcx.ins().trap(TrapCode::User(1));
26 } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
29 InlineAsmTemplatePiece::Placeholder {
35 && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
36 && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
37 && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
38 && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
41 InlineAsmTemplatePiece::Placeholder {
48 assert_eq!(operands.len(), 4);
49 let (leaf, eax_place) = match operands[1] {
50 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
53 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
56 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
57 crate::base::codegen_place(fx, out_place.unwrap()),
62 let ebx_place = match operands[0] {
63 InlineAsmOperand::Out { reg, late: true, place } => {
66 InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
67 X86InlineAsmRegClass::reg
70 crate::base::codegen_place(fx, place.unwrap())
74 let (sub_leaf, ecx_place) = match operands[2] {
75 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
78 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
81 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
82 crate::base::codegen_place(fx, out_place.unwrap()),
87 let edx_place = match operands[3] {
88 InlineAsmOperand::Out { reg, late: true, place } => {
91 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
93 crate::base::codegen_place(fx, place.unwrap())
98 let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
100 eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
101 ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
102 ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
103 edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
104 let destination_block = fx.get_block(destination.unwrap());
105 fx.bcx.ins().jump(destination_block, &[]);
107 } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
108 // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
109 crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
111 } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
112 crate::trap::trap_unimplemented(fx, "Alloca is not supported");
117 let mut inputs = Vec::new();
118 let mut outputs = Vec::new();
120 let mut asm_gen = InlineAssemblyGenerator {
122 arch: fx.tcx.sess.asm_arch.unwrap(),
123 enclosing_def_id: fx.instance.def_id(),
127 registers: Vec::new(),
128 stack_slots_clobber: Vec::new(),
129 stack_slots_input: Vec::new(),
130 stack_slots_output: Vec::new(),
131 stack_slot_size: Size::from_bytes(0),
133 asm_gen.allocate_registers();
134 asm_gen.allocate_stack_slots();
136 let inline_asm_index = fx.cx.inline_asm_index.get();
137 fx.cx.inline_asm_index.set(inline_asm_index + 1);
138 let asm_name = format!(
139 "__inline_asm_{}_n{}",
140 fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
144 let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
145 fx.cx.global_asm.push_str(&generated_asm);
147 for (i, operand) in operands.iter().enumerate() {
149 InlineAsmOperand::In { reg: _, ref value } => {
151 asm_gen.stack_slots_input[i].unwrap(),
152 crate::base::codegen_operand(fx, value).load_scalar(fx),
155 InlineAsmOperand::Out { reg: _, late: _, place } => {
156 if let Some(place) = place {
158 asm_gen.stack_slots_output[i].unwrap(),
159 crate::base::codegen_place(fx, place),
163 InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
165 asm_gen.stack_slots_input[i].unwrap(),
166 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
168 if let Some(out_place) = out_place {
170 asm_gen.stack_slots_output[i].unwrap(),
171 crate::base::codegen_place(fx, out_place),
175 InlineAsmOperand::Const { value: _ } => todo!(),
176 InlineAsmOperand::SymFn { value: _ } => todo!(),
177 InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
181 call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
184 Some(destination) => {
185 let destination_block = fx.get_block(destination);
186 fx.bcx.ins().jump(destination_block, &[]);
189 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
194 struct InlineAssemblyGenerator<'a, 'tcx> {
197 enclosing_def_id: DefId,
198 template: &'a [InlineAsmTemplatePiece],
199 operands: &'a [InlineAsmOperand<'tcx>],
200 options: InlineAsmOptions,
201 registers: Vec<Option<InlineAsmReg>>,
202 stack_slots_clobber: Vec<Option<Size>>,
203 stack_slots_input: Vec<Option<Size>>,
204 stack_slots_output: Vec<Option<Size>>,
205 stack_slot_size: Size,
208 impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
209 fn allocate_registers(&mut self) {
210 let sess = self.tcx.sess;
211 let map = allocatable_registers(
213 sess.relocation_model(),
214 self.tcx.asm_target_features(self.enclosing_def_id),
217 let mut allocated = FxHashMap::<_, (bool, bool)>::default();
218 let mut regs = vec![None; self.operands.len()];
220 // Add explicit registers to the allocated set.
221 for (i, operand) in self.operands.iter().enumerate() {
223 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
225 allocated.entry(reg).or_default().0 = true;
227 InlineAsmOperand::Out {
228 reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
231 allocated.entry(reg).or_default().1 = true;
233 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
234 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
236 allocated.insert(reg, (true, true));
242 // Allocate out/inout/inlateout registers first because they are more constrained.
243 for (i, operand) in self.operands.iter().enumerate() {
245 InlineAsmOperand::Out {
246 reg: InlineAsmRegOrRegClass::RegClass(class),
250 | InlineAsmOperand::InOut {
251 reg: InlineAsmRegOrRegClass::RegClass(class), ..
253 let mut alloc_reg = None;
254 for ® in &map[&class] {
255 let mut used = false;
256 reg.overlapping_regs(|r| {
257 if allocated.contains_key(&r) {
263 alloc_reg = Some(reg);
268 let reg = alloc_reg.expect("cannot allocate registers");
270 allocated.insert(reg, (true, true));
276 // Allocate in/lateout.
277 for (i, operand) in self.operands.iter().enumerate() {
279 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
280 let mut alloc_reg = None;
281 for ® in &map[&class] {
282 let mut used = false;
283 reg.overlapping_regs(|r| {
284 if allocated.get(&r).copied().unwrap_or_default().0 {
290 alloc_reg = Some(reg);
295 let reg = alloc_reg.expect("cannot allocate registers");
297 allocated.entry(reg).or_default().0 = true;
299 InlineAsmOperand::Out {
300 reg: InlineAsmRegOrRegClass::RegClass(class),
304 let mut alloc_reg = None;
305 for ® in &map[&class] {
306 let mut used = false;
307 reg.overlapping_regs(|r| {
308 if allocated.get(&r).copied().unwrap_or_default().1 {
314 alloc_reg = Some(reg);
319 let reg = alloc_reg.expect("cannot allocate registers");
321 allocated.entry(reg).or_default().1 = true;
327 self.registers = regs;
330 fn allocate_stack_slots(&mut self) {
331 let mut slot_size = Size::from_bytes(0);
332 let mut slots_clobber = vec![None; self.operands.len()];
333 let mut slots_input = vec![None; self.operands.len()];
334 let mut slots_output = vec![None; self.operands.len()];
336 let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
338 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
339 let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
340 let offset = slot_size.align_to(align);
341 *slot_size = offset + reg_size;
344 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
346 // Allocate stack slots for saving clobbered registers
347 let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
350 for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
351 let mut need_save = true;
352 // If the register overlaps with a register clobbered by function call, then
353 // we don't need to save it.
354 for r in abi_clobber {
355 r.overlapping_regs(|r| {
367 slots_clobber[i] = Some(new_slot(reg.reg_class()));
371 // Allocate stack slots for inout
372 for (i, operand) in self.operands.iter().enumerate() {
374 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
375 let slot = new_slot(reg.reg_class());
376 slots_input[i] = Some(slot);
377 slots_output[i] = Some(slot);
383 let slot_size_before_input = slot_size;
384 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
386 // Allocate stack slots for input
387 for (i, operand) in self.operands.iter().enumerate() {
389 InlineAsmOperand::In { reg, .. }
390 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
391 slots_input[i] = Some(new_slot(reg.reg_class()));
397 // Reset slot size to before input so that input and output operands can overlap
398 // and save some memory.
399 let slot_size_after_input = slot_size;
400 slot_size = slot_size_before_input;
401 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
403 // Allocate stack slots for output
404 for (i, operand) in self.operands.iter().enumerate() {
406 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
407 slots_output[i] = Some(new_slot(reg.reg_class()));
413 slot_size = slot_size.max(slot_size_after_input);
415 self.stack_slots_clobber = slots_clobber;
416 self.stack_slots_input = slots_input;
417 self.stack_slots_output = slots_output;
418 self.stack_slot_size = slot_size;
421 fn generate_asm_wrapper(&self, asm_name: &str) -> String {
422 let mut generated_asm = String::new();
423 writeln!(generated_asm, ".globl {}", asm_name).unwrap();
424 writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
425 writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
426 writeln!(generated_asm, "{}:", asm_name).unwrap();
428 let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
431 generated_asm.push_str(".intel_syntax noprefix\n");
433 Self::prologue(&mut generated_asm, self.arch);
435 // Save clobbered registers
436 if !self.options.contains(InlineAsmOptions::NORETURN) {
437 for (reg, slot) in self
440 .zip(self.stack_slots_clobber.iter().copied())
441 .filter_map(|(r, s)| r.zip(s))
443 Self::save_register(&mut generated_asm, self.arch, reg, slot);
447 // Write input registers
448 for (reg, slot) in self
451 .zip(self.stack_slots_input.iter().copied())
452 .filter_map(|(r, s)| r.zip(s))
454 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
457 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
458 generated_asm.push_str(".att_syntax\n");
461 // The actual inline asm
462 for piece in self.template {
464 InlineAsmTemplatePiece::String(s) => {
465 generated_asm.push_str(s);
467 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
468 if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
469 generated_asm.push('%');
471 self.registers[*operand_idx]
473 .emit(&mut generated_asm, self.arch, *modifier)
478 generated_asm.push('\n');
480 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
481 generated_asm.push_str(".intel_syntax noprefix\n");
484 if !self.options.contains(InlineAsmOptions::NORETURN) {
485 // Read output registers
486 for (reg, slot) in self
489 .zip(self.stack_slots_output.iter().copied())
490 .filter_map(|(r, s)| r.zip(s))
492 Self::save_register(&mut generated_asm, self.arch, reg, slot);
495 // Restore clobbered registers
496 for (reg, slot) in self
499 .zip(self.stack_slots_clobber.iter().copied())
500 .filter_map(|(r, s)| r.zip(s))
502 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
505 Self::epilogue(&mut generated_asm, self.arch);
507 Self::epilogue_noreturn(&mut generated_asm, self.arch);
511 generated_asm.push_str(".att_syntax\n");
513 writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
514 generated_asm.push_str(".text\n");
515 generated_asm.push_str("\n\n");
520 fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
522 InlineAsmArch::X86 => {
523 generated_asm.push_str(" push ebp\n");
524 generated_asm.push_str(" mov ebp,[esp+8]\n");
526 InlineAsmArch::X86_64 => {
527 generated_asm.push_str(" push rbp\n");
528 generated_asm.push_str(" mov rbp,rdi\n");
530 InlineAsmArch::RiscV32 => {
531 generated_asm.push_str(" addi sp, sp, -8\n");
532 generated_asm.push_str(" sw ra, 4(sp)\n");
533 generated_asm.push_str(" sw s0, 0(sp)\n");
534 generated_asm.push_str(" mv s0, a0\n");
536 InlineAsmArch::RiscV64 => {
537 generated_asm.push_str(" addi sp, sp, -16\n");
538 generated_asm.push_str(" sd ra, 8(sp)\n");
539 generated_asm.push_str(" sd s0, 0(sp)\n");
540 generated_asm.push_str(" mv s0, a0\n");
542 _ => unimplemented!("prologue for {:?}", arch),
546 fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
548 InlineAsmArch::X86 => {
549 generated_asm.push_str(" pop ebp\n");
550 generated_asm.push_str(" ret\n");
552 InlineAsmArch::X86_64 => {
553 generated_asm.push_str(" pop rbp\n");
554 generated_asm.push_str(" ret\n");
556 InlineAsmArch::RiscV32 => {
557 generated_asm.push_str(" lw s0, 0(sp)\n");
558 generated_asm.push_str(" lw ra, 4(sp)\n");
559 generated_asm.push_str(" addi sp, sp, 8\n");
560 generated_asm.push_str(" ret\n");
562 InlineAsmArch::RiscV64 => {
563 generated_asm.push_str(" ld s0, 0(sp)\n");
564 generated_asm.push_str(" ld ra, 8(sp)\n");
565 generated_asm.push_str(" addi sp, sp, 16\n");
566 generated_asm.push_str(" ret\n");
568 _ => unimplemented!("epilogue for {:?}", arch),
572 fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
574 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
575 generated_asm.push_str(" ud2\n");
577 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
578 generated_asm.push_str(" ebreak\n");
580 _ => unimplemented!("epilogue_noreturn for {:?}", arch),
585 generated_asm: &mut String,
591 InlineAsmArch::X86 => {
592 write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
593 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
594 generated_asm.push('\n');
596 InlineAsmArch::X86_64 => {
597 write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
598 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
599 generated_asm.push('\n');
601 InlineAsmArch::RiscV32 => {
602 generated_asm.push_str(" sw ");
603 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
604 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
606 InlineAsmArch::RiscV64 => {
607 generated_asm.push_str(" sd ");
608 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
609 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
611 _ => unimplemented!("save_register for {:?}", arch),
616 generated_asm: &mut String,
622 InlineAsmArch::X86 => {
623 generated_asm.push_str(" mov ");
624 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
625 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
627 InlineAsmArch::X86_64 => {
628 generated_asm.push_str(" mov ");
629 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
630 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
632 InlineAsmArch::RiscV32 => {
633 generated_asm.push_str(" lw ");
634 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
635 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
637 InlineAsmArch::RiscV64 => {
638 generated_asm.push_str(" ld ");
639 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
640 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
642 _ => unimplemented!("restore_register for {:?}", arch),
647 fn call_inline_asm<'tcx>(
648 fx: &mut FunctionCx<'_, '_, 'tcx>,
651 inputs: Vec<(Size, Value)>,
652 outputs: Vec<(Size, CPlace<'tcx>)>,
654 let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
655 kind: StackSlotKind::ExplicitSlot,
656 size: u32::try_from(slot_size.bytes()).unwrap(),
658 if fx.clif_comments.enabled() {
659 fx.add_comment(stack_slot, "inline asm scratch slot");
662 let inline_asm_func = fx
668 call_conv: CallConv::SystemV,
669 params: vec![AbiParam::new(fx.pointer_type)],
674 let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
675 if fx.clif_comments.enabled() {
676 fx.add_comment(inline_asm_func, asm_name);
679 for (offset, value) in inputs {
680 fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
683 let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
684 fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
686 for (offset, place) in outputs {
687 let ty = fx.clif_type(place.layout().ty).unwrap();
688 let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
689 place.write_cvalue(fx, CValue::by_val(value, place.layout()));