1 use crate::builder::Builder;
2 use crate::common::Funclet;
3 use crate::context::CodegenCx;
6 use crate::type_::Type;
7 use crate::type_of::LayoutLlvmExt;
8 use crate::value::Value;
10 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
11 use rustc_codegen_ssa::mir::operand::OperandValue;
12 use rustc_codegen_ssa::traits::*;
13 use rustc_data_structures::fx::FxHashMap;
14 use rustc_middle::ty::layout::TyAndLayout;
15 use rustc_middle::{bug, span_bug, ty::Instance};
16 use rustc_span::{Pos, Span};
17 use rustc_target::abi::*;
18 use rustc_target::asm::*;
20 use libc::{c_char, c_uint};
23 impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
24 fn codegen_inline_asm(
26 template: &[InlineAsmTemplatePiece],
27 operands: &[InlineAsmOperandRef<'tcx, Self>],
28 options: InlineAsmOptions,
30 instance: Instance<'_>,
31 dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
33 let asm_arch = self.tcx.sess.asm_arch.unwrap();
35 // Collect the types of output operands
36 let mut constraints = vec![];
37 let mut clobbers = vec![];
38 let mut output_types = vec![];
39 let mut op_idx = FxHashMap::default();
40 let mut clobbered_x87 = false;
41 for (idx, op) in operands.iter().enumerate() {
43 InlineAsmOperandRef::Out { reg, late, place } => {
44 let is_target_supported = |reg_class: InlineAsmRegClass| {
45 for &(_, feature) in reg_class.supported_types(asm_arch) {
46 if let Some(feature) = feature {
47 let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
48 if self.tcx.sess.target_features.contains(&feature)
49 || codegen_fn_attrs.target_features.contains(&feature)
54 // Register class is unconditionally supported
61 let mut layout = None;
62 let ty = if let Some(ref place) = place {
63 layout = Some(&place.layout);
64 llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
67 InlineAsmRegClass::X86(
68 X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
71 // Special handling for x87/mmx registers: we always
72 // clobber the whole set if one register is marked as
73 // clobbered. This is due to the way LLVM handles the
74 // FP stack in inline assembly.
77 clobbers.push("~{st}".to_string());
79 clobbers.push(format!("~{{st({})}}", i));
83 } else if !is_target_supported(reg.reg_class())
84 || reg.reg_class().is_clobber_only(asm_arch)
86 // We turn discarded outputs into clobber constraints
87 // if the target feature needed by the register class is
88 // disabled. This is necessary otherwise LLVM will try
89 // to actually allocate a register for the dummy output.
90 assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
91 clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
94 // If the output is discarded, we don't really care what
95 // type is used. We're just using this to tell LLVM to
96 // reserve the register.
97 dummy_output_type(self.cx, reg.reg_class())
99 output_types.push(ty);
100 op_idx.insert(idx, constraints.len());
101 let prefix = if late { "=" } else { "=&" };
102 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
104 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
105 let layout = if let Some(ref out_place) = out_place {
108 // LLVM required tied operands to have the same type,
109 // so we just use the type of the input.
112 let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
113 output_types.push(ty);
114 op_idx.insert(idx, constraints.len());
115 let prefix = if late { "=" } else { "=&" };
116 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
122 // Collect input operands
123 let mut inputs = vec![];
124 for (idx, op) in operands.iter().enumerate() {
126 InlineAsmOperandRef::In { reg, value } => {
128 llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
130 op_idx.insert(idx, constraints.len());
131 constraints.push(reg_to_llvm(reg, Some(&value.layout)));
133 InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
134 let value = llvm_fixup_input(
136 in_value.immediate(),
141 constraints.push(format!("{}", op_idx[&idx]));
143 InlineAsmOperandRef::SymFn { instance } => {
144 inputs.push(self.cx.get_fn(instance));
145 op_idx.insert(idx, constraints.len());
146 constraints.push("s".to_string());
148 InlineAsmOperandRef::SymStatic { def_id } => {
149 inputs.push(self.cx.get_static(def_id));
150 op_idx.insert(idx, constraints.len());
151 constraints.push("s".to_string());
157 // Build the template string
158 let mut template_str = String::new();
159 for piece in template {
161 InlineAsmTemplatePiece::String(ref s) => {
165 template_str.push_str("$$");
167 template_str.push(c);
171 template_str.push_str(s)
174 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
175 match operands[operand_idx] {
176 InlineAsmOperandRef::In { reg, .. }
177 | InlineAsmOperandRef::Out { reg, .. }
178 | InlineAsmOperandRef::InOut { reg, .. } => {
179 let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
180 if let Some(modifier) = modifier {
181 template_str.push_str(&format!(
183 op_idx[&operand_idx], modifier
186 template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
189 InlineAsmOperandRef::Const { ref string } => {
190 // Const operands get injected directly into the template
191 template_str.push_str(string);
193 InlineAsmOperandRef::SymFn { .. }
194 | InlineAsmOperandRef::SymStatic { .. } => {
195 // Only emit the raw symbol name
196 template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
203 constraints.append(&mut clobbers);
204 if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
206 InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
207 constraints.push("~{cc}".to_string());
209 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
210 constraints.extend_from_slice(&[
211 "~{dirflag}".to_string(),
212 "~{fpsr}".to_string(),
213 "~{flags}".to_string(),
216 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
217 constraints.extend_from_slice(&[
218 "~{vtype}".to_string(),
220 "~{vxsat}".to_string(),
221 "~{vxrm}".to_string(),
224 InlineAsmArch::Avr => {
225 constraints.push("~{sreg}".to_string());
227 InlineAsmArch::Nvptx64 => {}
228 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
229 InlineAsmArch::Hexagon => {}
230 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
231 InlineAsmArch::S390x => {}
232 InlineAsmArch::SpirV => {}
233 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
234 InlineAsmArch::Bpf => {}
235 InlineAsmArch::Msp430 => {
236 constraints.push("~{sr}".to_string());
240 if !options.contains(InlineAsmOptions::NOMEM) {
241 // This is actually ignored by LLVM, but it's probably best to keep
242 // it just in case. LLVM instead uses the ReadOnly/ReadNone
243 // attributes on the call instruction to optimize.
244 constraints.push("~{memory}".to_string());
246 let volatile = !options.contains(InlineAsmOptions::PURE);
247 let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
248 let output_type = match &output_types[..] {
249 [] => self.type_void(),
251 tys => self.type_struct(tys, false),
253 let dialect = match asm_arch {
254 InlineAsmArch::X86 | InlineAsmArch::X86_64
255 if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
257 llvm::AsmDialect::Intel
259 _ => llvm::AsmDialect::Att,
261 let result = inline_asm_call(
264 &constraints.join(","),
271 options.contains(InlineAsmOptions::MAY_UNWIND),
274 .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
276 if options.contains(InlineAsmOptions::PURE) {
277 if options.contains(InlineAsmOptions::NOMEM) {
278 llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
279 } else if options.contains(InlineAsmOptions::READONLY) {
280 llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
282 llvm::Attribute::WillReturn.apply_callsite(llvm::AttributePlace::Function, result);
283 } else if options.contains(InlineAsmOptions::NOMEM) {
284 llvm::Attribute::InaccessibleMemOnly
285 .apply_callsite(llvm::AttributePlace::Function, result);
287 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
290 // Write results to outputs
291 for (idx, op) in operands.iter().enumerate() {
292 if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
293 | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
295 let value = if output_types.len() == 1 {
298 self.extract_value(result, op_idx[&idx] as u64)
300 let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
301 OperandValue::Immediate(value).store(self, place);
307 impl AsmMethods for CodegenCx<'_, '_> {
308 fn codegen_global_asm(
310 template: &[InlineAsmTemplatePiece],
311 operands: &[GlobalAsmOperandRef],
312 options: InlineAsmOptions,
313 _line_spans: &[Span],
315 let asm_arch = self.tcx.sess.asm_arch.unwrap();
317 // Default to Intel syntax on x86
318 let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
319 && !options.contains(InlineAsmOptions::ATT_SYNTAX);
321 // Build the template string
322 let mut template_str = String::new();
324 template_str.push_str(".intel_syntax\n");
326 for piece in template {
328 InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
329 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
330 match operands[operand_idx] {
331 GlobalAsmOperandRef::Const { ref string } => {
332 // Const operands get injected directly into the
333 // template. Note that we don't need to escape $
334 // here unlike normal inline assembly.
335 template_str.push_str(string);
342 template_str.push_str("\n.att_syntax\n");
346 llvm::LLVMRustAppendModuleInlineAsm(
348 template_str.as_ptr().cast(),
355 pub(crate) fn inline_asm_call<'ll>(
356 bx: &mut Builder<'_, 'll, '_>,
359 inputs: &[&'ll Value],
360 output: &'ll llvm::Type,
363 dia: llvm::AsmDialect,
366 dest_catch_funclet: Option<(
367 &'ll llvm::BasicBlock,
368 &'ll llvm::BasicBlock,
369 Option<&Funclet<'ll>>,
371 ) -> Option<&'ll Value> {
372 let volatile = if volatile { llvm::True } else { llvm::False };
373 let alignstack = if alignstack { llvm::True } else { llvm::False };
374 let can_throw = if unwind { llvm::True } else { llvm::False };
379 debug!("Asm Input Type: {:?}", *v);
382 .collect::<Vec<_>>();
384 debug!("Asm Output Type: {:?}", output);
385 let fty = bx.cx.type_func(&argtys, output);
387 // Ask LLVM to verify that the constraints are well-formed.
388 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
389 debug!("constraint verification result: {:?}", constraints_ok);
391 if unwind && llvm_util::get_version() < (13, 0, 0) {
392 bx.cx.sess().span_fatal(
394 "unwinding from inline assembly is only supported on llvm >= 13.",
398 let v = llvm::LLVMRustInlineAsm(
402 cons.as_ptr().cast(),
410 let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
411 bx.invoke(fty, v, inputs, dest, catch, funclet)
413 bx.call(fty, v, inputs, None)
416 // Store mark in a metadata node so we can map LLVM errors
417 // back to source locations. See #17552.
419 let kind = llvm::LLVMGetMDKindIDInContext(
421 key.as_ptr() as *const c_char,
425 // srcloc contains one integer for each line of assembly code.
426 // Unfortunately this isn't enough to encode a full span so instead
427 // we just encode the start position of each line.
428 // FIXME: Figure out a way to pass the entire line spans.
429 let mut srcloc = vec![];
430 if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
431 // LLVM inserts an extra line to add the ".intel_syntax", so add
432 // a dummy srcloc entry for it.
434 // Don't do this if we only have 1 line span since that may be
435 // due to the asm template string coming from a macro. LLVM will
436 // default to the first srcloc for lines that don't have an
437 // associated srcloc.
438 srcloc.push(bx.const_i32(0));
440 srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
441 let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
442 llvm::LLVMSetMetadata(call, kind, md);
446 // LLVM has detected an issue with our constraints, bail out
452 /// If the register is an xmm/ymm/zmm register then return its index.
453 fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
455 InlineAsmReg::X86(reg)
456 if reg as u32 >= X86InlineAsmReg::xmm0 as u32
457 && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
459 Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
461 InlineAsmReg::X86(reg)
462 if reg as u32 >= X86InlineAsmReg::ymm0 as u32
463 && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
465 Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
467 InlineAsmReg::X86(reg)
468 if reg as u32 >= X86InlineAsmReg::zmm0 as u32
469 && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
471 Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
477 /// If the register is an AArch64 vector register then return its index.
478 fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
480 InlineAsmReg::AArch64(reg)
481 if reg as u32 >= AArch64InlineAsmReg::v0 as u32
482 && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
484 Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
490 /// Converts a register class to an LLVM constraint code.
491 fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
493 // For vector registers LLVM wants the register name to match the type size.
494 InlineAsmRegOrRegClass::Reg(reg) => {
495 if let Some(idx) = xmm_reg_index(reg) {
496 let class = if let Some(layout) = layout {
497 match layout.size.bytes() {
503 // We use f32 as the type for discarded outputs
506 format!("{{{}mm{}}}", class, idx)
507 } else if let Some(idx) = a64_vreg_index(reg) {
508 let class = if let Some(layout) = layout {
509 match layout.size.bytes() {
514 1 => 'd', // We fixup i8 to i8x8
518 // We use i64x2 as the type for discarded outputs
521 format!("{{{}{}}}", class, idx)
522 } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
523 // LLVM doesn't recognize x30
525 } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
526 // LLVM doesn't recognize r14
529 format!("{{{}}}", reg.name())
532 InlineAsmRegOrRegClass::RegClass(reg) => match reg {
533 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
534 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
535 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
536 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
537 unreachable!("clobber-only")
539 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
540 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
541 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
542 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
543 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
544 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
545 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
546 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
547 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
548 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
549 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
550 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
551 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
552 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
553 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
554 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
555 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
556 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
557 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
558 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
559 unreachable!("clobber-only")
561 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
562 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
563 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
564 unreachable!("clobber-only")
566 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
567 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
568 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
569 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
570 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
571 InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
572 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
573 InlineAsmRegClass::X86(
574 X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg,
575 ) => unreachable!("clobber-only"),
576 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
577 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
578 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
579 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
580 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
581 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
582 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
583 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
584 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
585 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
586 InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
587 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
588 bug!("LLVM backend does not support SPIR-V")
590 InlineAsmRegClass::Err => unreachable!(),
596 /// Converts a modifier into LLVM's equivalent modifier.
599 reg: InlineAsmRegClass,
600 modifier: Option<char>,
603 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
604 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
605 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
606 if modifier == Some('v') { None } else { modifier }
608 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
609 unreachable!("clobber-only")
611 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
612 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
613 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
614 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
615 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
616 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
617 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
618 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
619 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
620 if modifier.is_none() {
626 InlineAsmRegClass::Hexagon(_) => None,
627 InlineAsmRegClass::Mips(_) => None,
628 InlineAsmRegClass::Nvptx(_) => None,
629 InlineAsmRegClass::PowerPC(_) => None,
630 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
631 | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
632 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
633 unreachable!("clobber-only")
635 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
636 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
637 None if arch == InlineAsmArch::X86_64 => Some('q'),
639 Some('l') => Some('b'),
640 Some('h') => Some('h'),
641 Some('x') => Some('w'),
642 Some('e') => Some('k'),
643 Some('r') => Some('q'),
646 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
647 InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
648 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
649 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
650 (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
651 (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
652 (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
653 (_, Some('x')) => Some('x'),
654 (_, Some('y')) => Some('t'),
655 (_, Some('z')) => Some('g'),
658 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
659 InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
660 unreachable!("clobber-only")
662 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
663 InlineAsmRegClass::Bpf(_) => None,
664 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
665 | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
666 | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
667 Some('h') => Some('B'),
668 Some('l') => Some('A'),
671 InlineAsmRegClass::Avr(_) => None,
672 InlineAsmRegClass::S390x(_) => None,
673 InlineAsmRegClass::Msp430(_) => None,
674 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
675 bug!("LLVM backend does not support SPIR-V")
677 InlineAsmRegClass::Err => unreachable!(),
681 /// Type to use for outputs that are discarded. It doesn't really matter what
682 /// the type is, as long as it is valid for the constraint code.
683 fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
685 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
686 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
687 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
688 cx.type_vector(cx.type_i64(), 2)
690 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
691 unreachable!("clobber-only")
693 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
694 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
695 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
696 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
697 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
698 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
699 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
700 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
701 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
702 cx.type_vector(cx.type_i64(), 2)
704 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
705 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
706 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
707 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
708 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
709 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
710 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
711 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
712 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
713 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
714 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
715 unreachable!("clobber-only")
717 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
718 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
719 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
720 unreachable!("clobber-only")
722 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
723 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
724 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
725 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
726 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
727 | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
728 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
729 InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
730 unreachable!("clobber-only")
732 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
733 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
734 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
735 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
736 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
737 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
738 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
739 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
740 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
741 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
742 InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
743 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
744 bug!("LLVM backend does not support SPIR-V")
746 InlineAsmRegClass::Err => unreachable!(),
750 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
751 /// the equivalent integer type.
752 fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
754 Primitive::Int(Integer::I8, _) => cx.type_i8(),
755 Primitive::Int(Integer::I16, _) => cx.type_i16(),
756 Primitive::Int(Integer::I32, _) => cx.type_i32(),
757 Primitive::Int(Integer::I64, _) => cx.type_i64(),
758 Primitive::F32 => cx.type_f32(),
759 Primitive::F64 => cx.type_f64(),
760 Primitive::Pointer => cx.type_isize(),
765 /// Fix up an input value to work around LLVM bugs.
766 fn llvm_fixup_input<'ll, 'tcx>(
767 bx: &mut Builder<'_, 'll, 'tcx>,
768 mut value: &'ll Value,
769 reg: InlineAsmRegClass,
770 layout: &TyAndLayout<'tcx>,
772 match (reg, layout.abi) {
773 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
774 if let Primitive::Int(Integer::I8, _) = s.value {
775 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
776 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
781 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
782 let elem_ty = llvm_asm_scalar_type(bx.cx, s);
783 let count = 16 / layout.size.bytes();
784 let vec_ty = bx.cx.type_vector(elem_ty, count);
785 if let Primitive::Pointer = s.value {
786 value = bx.ptrtoint(value, bx.cx.type_isize());
788 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
791 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
792 Abi::Vector { element, count },
793 ) if layout.size.bytes() == 8 => {
794 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
795 let vec_ty = bx.cx.type_vector(elem_ty, count);
796 let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
797 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
799 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
800 if s.value == Primitive::F64 =>
802 bx.bitcast(value, bx.cx.type_i64())
805 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
807 ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
809 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
812 if let Primitive::Int(Integer::I32, _) = s.value {
813 bx.bitcast(value, bx.cx.type_f32())
819 InlineAsmRegClass::Arm(
820 ArmInlineAsmRegClass::dreg
821 | ArmInlineAsmRegClass::dreg_low8
822 | ArmInlineAsmRegClass::dreg_low16,
826 if let Primitive::Int(Integer::I64, _) = s.value {
827 bx.bitcast(value, bx.cx.type_f64())
832 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
833 // MIPS only supports register-length arithmetics.
834 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
835 Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
836 Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
843 /// Fix up an output value to work around LLVM bugs.
844 fn llvm_fixup_output<'ll, 'tcx>(
845 bx: &mut Builder<'_, 'll, 'tcx>,
846 mut value: &'ll Value,
847 reg: InlineAsmRegClass,
848 layout: &TyAndLayout<'tcx>,
850 match (reg, layout.abi) {
851 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
852 if let Primitive::Int(Integer::I8, _) = s.value {
853 bx.extract_element(value, bx.const_i32(0))
858 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
859 value = bx.extract_element(value, bx.const_i32(0));
860 if let Primitive::Pointer = s.value {
861 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
866 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
867 Abi::Vector { element, count },
868 ) if layout.size.bytes() == 8 => {
869 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
870 let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
871 let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
872 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
874 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
875 if s.value == Primitive::F64 =>
877 bx.bitcast(value, bx.cx.type_f64())
880 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
882 ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
884 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
887 if let Primitive::Int(Integer::I32, _) = s.value {
888 bx.bitcast(value, bx.cx.type_i32())
894 InlineAsmRegClass::Arm(
895 ArmInlineAsmRegClass::dreg
896 | ArmInlineAsmRegClass::dreg_low8
897 | ArmInlineAsmRegClass::dreg_low16,
901 if let Primitive::Int(Integer::I64, _) = s.value {
902 bx.bitcast(value, bx.cx.type_i64())
907 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
908 // MIPS only supports register-length arithmetics.
909 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
910 Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
911 Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
912 Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
919 /// Output type to use for llvm_fixup_output.
920 fn llvm_fixup_output_type<'ll, 'tcx>(
921 cx: &CodegenCx<'ll, 'tcx>,
922 reg: InlineAsmRegClass,
923 layout: &TyAndLayout<'tcx>,
925 match (reg, layout.abi) {
926 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
927 if let Primitive::Int(Integer::I8, _) = s.value {
928 cx.type_vector(cx.type_i8(), 8)
933 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
934 let elem_ty = llvm_asm_scalar_type(cx, s);
935 let count = 16 / layout.size.bytes();
936 cx.type_vector(elem_ty, count)
939 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
940 Abi::Vector { element, count },
941 ) if layout.size.bytes() == 8 => {
942 let elem_ty = llvm_asm_scalar_type(cx, element);
943 cx.type_vector(elem_ty, count * 2)
945 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
946 if s.value == Primitive::F64 =>
951 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
953 ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
955 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
958 if let Primitive::Int(Integer::I32, _) = s.value {
965 InlineAsmRegClass::Arm(
966 ArmInlineAsmRegClass::dreg
967 | ArmInlineAsmRegClass::dreg_low8
968 | ArmInlineAsmRegClass::dreg_low16,
972 if let Primitive::Int(Integer::I64, _) = s.value {
978 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
979 // MIPS only supports register-length arithmetics.
980 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
981 Primitive::F32 => cx.type_i32(),
982 Primitive::F64 => cx.type_i64(),
983 _ => layout.llvm_type(cx),
985 _ => layout.llvm_type(cx),