2 use crate::builder::Builder;
3 use crate::common::Funclet;
4 use crate::context::CodegenCx;
6 use crate::type_::Type;
7 use crate::type_of::LayoutLlvmExt;
8 use crate::value::Value;
10 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
11 use rustc_codegen_ssa::mir::operand::OperandValue;
12 use rustc_codegen_ssa::traits::*;
13 use rustc_data_structures::fx::FxHashMap;
14 use rustc_middle::ty::layout::TyAndLayout;
15 use rustc_middle::{bug, span_bug, ty::Instance};
16 use rustc_span::{Pos, Span};
17 use rustc_target::abi::*;
18 use rustc_target::asm::*;
20 use libc::{c_char, c_uint};
21 use smallvec::SmallVec;
24 impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
25 fn codegen_inline_asm(
27 template: &[InlineAsmTemplatePiece],
28 operands: &[InlineAsmOperandRef<'tcx, Self>],
29 options: InlineAsmOptions,
31 instance: Instance<'_>,
32 dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
34 let asm_arch = self.tcx.sess.asm_arch.unwrap();
36 // Collect the types of output operands
37 let mut constraints = vec![];
38 let mut clobbers = vec![];
39 let mut output_types = vec![];
40 let mut op_idx = FxHashMap::default();
41 let mut clobbered_x87 = false;
42 for (idx, op) in operands.iter().enumerate() {
44 InlineAsmOperandRef::Out { reg, late, place } => {
45 let is_target_supported = |reg_class: InlineAsmRegClass| {
46 for &(_, feature) in reg_class.supported_types(asm_arch) {
47 if let Some(feature) = feature {
48 let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
49 if self.tcx.sess.target_features.contains(&feature)
50 || codegen_fn_attrs.target_features.contains(&feature)
55 // Register class is unconditionally supported
62 let mut layout = None;
63 let ty = if let Some(ref place) = place {
64 layout = Some(&place.layout);
65 llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
68 InlineAsmRegClass::X86(
69 X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
72 // Special handling for x87/mmx registers: we always
73 // clobber the whole set if one register is marked as
74 // clobbered. This is due to the way LLVM handles the
75 // FP stack in inline assembly.
78 clobbers.push("~{st}".to_string());
80 clobbers.push(format!("~{{st({})}}", i));
84 } else if !is_target_supported(reg.reg_class())
85 || reg.reg_class().is_clobber_only(asm_arch)
87 // We turn discarded outputs into clobber constraints
88 // if the target feature needed by the register class is
89 // disabled. This is necessary otherwise LLVM will try
90 // to actually allocate a register for the dummy output.
91 assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
92 clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
95 // If the output is discarded, we don't really care what
96 // type is used. We're just using this to tell LLVM to
97 // reserve the register.
98 dummy_output_type(self.cx, reg.reg_class())
100 output_types.push(ty);
101 op_idx.insert(idx, constraints.len());
102 let prefix = if late { "=" } else { "=&" };
103 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
105 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
106 let layout = if let Some(ref out_place) = out_place {
109 // LLVM required tied operands to have the same type,
110 // so we just use the type of the input.
113 let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
114 output_types.push(ty);
115 op_idx.insert(idx, constraints.len());
116 let prefix = if late { "=" } else { "=&" };
117 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
123 // Collect input operands
124 let mut inputs = vec![];
125 for (idx, op) in operands.iter().enumerate() {
127 InlineAsmOperandRef::In { reg, value } => {
129 llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
131 op_idx.insert(idx, constraints.len());
132 constraints.push(reg_to_llvm(reg, Some(&value.layout)));
134 InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
135 let value = llvm_fixup_input(
137 in_value.immediate(),
142 constraints.push(format!("{}", op_idx[&idx]));
144 InlineAsmOperandRef::SymFn { instance } => {
145 inputs.push(self.cx.get_fn(instance));
146 op_idx.insert(idx, constraints.len());
147 constraints.push("s".to_string());
149 InlineAsmOperandRef::SymStatic { def_id } => {
150 inputs.push(self.cx.get_static(def_id));
151 op_idx.insert(idx, constraints.len());
152 constraints.push("s".to_string());
158 // Build the template string
159 let mut template_str = String::new();
160 for piece in template {
162 InlineAsmTemplatePiece::String(ref s) => {
166 template_str.push_str("$$");
168 template_str.push(c);
172 template_str.push_str(s)
175 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
176 match operands[operand_idx] {
177 InlineAsmOperandRef::In { reg, .. }
178 | InlineAsmOperandRef::Out { reg, .. }
179 | InlineAsmOperandRef::InOut { reg, .. } => {
180 let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
181 if let Some(modifier) = modifier {
182 template_str.push_str(&format!(
184 op_idx[&operand_idx], modifier
187 template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
190 InlineAsmOperandRef::Const { ref string } => {
191 // Const operands get injected directly into the template
192 template_str.push_str(string);
194 InlineAsmOperandRef::SymFn { .. }
195 | InlineAsmOperandRef::SymStatic { .. } => {
196 // Only emit the raw symbol name
197 template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
204 constraints.append(&mut clobbers);
205 if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
207 InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
208 constraints.push("~{cc}".to_string());
210 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
211 constraints.extend_from_slice(&[
212 "~{dirflag}".to_string(),
213 "~{fpsr}".to_string(),
214 "~{flags}".to_string(),
217 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
218 constraints.extend_from_slice(&[
219 "~{vtype}".to_string(),
221 "~{vxsat}".to_string(),
222 "~{vxrm}".to_string(),
225 InlineAsmArch::Avr => {
226 constraints.push("~{sreg}".to_string());
228 InlineAsmArch::Nvptx64 => {}
229 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
230 InlineAsmArch::Hexagon => {}
231 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
232 InlineAsmArch::S390x => {}
233 InlineAsmArch::SpirV => {}
234 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
235 InlineAsmArch::Bpf => {}
236 InlineAsmArch::Msp430 => {
237 constraints.push("~{sr}".to_string());
241 if !options.contains(InlineAsmOptions::NOMEM) {
242 // This is actually ignored by LLVM, but it's probably best to keep
243 // it just in case. LLVM instead uses the ReadOnly/ReadNone
244 // attributes on the call instruction to optimize.
245 constraints.push("~{memory}".to_string());
247 let volatile = !options.contains(InlineAsmOptions::PURE);
248 let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
249 let output_type = match &output_types[..] {
250 [] => self.type_void(),
252 tys => self.type_struct(tys, false),
254 let dialect = match asm_arch {
255 InlineAsmArch::X86 | InlineAsmArch::X86_64
256 if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
258 llvm::AsmDialect::Intel
260 _ => llvm::AsmDialect::Att,
262 let result = inline_asm_call(
265 &constraints.join(","),
272 options.contains(InlineAsmOptions::MAY_UNWIND),
275 .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
277 let mut attrs = SmallVec::<[_; 2]>::new();
278 if options.contains(InlineAsmOptions::PURE) {
279 if options.contains(InlineAsmOptions::NOMEM) {
280 attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx));
281 } else if options.contains(InlineAsmOptions::READONLY) {
282 attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx));
284 attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
285 } else if options.contains(InlineAsmOptions::NOMEM) {
286 attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx));
288 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
290 attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
292 // Switch to the 'normal' basic block if we did an `invoke` instead of a `call`
293 if let Some((dest, _, _)) = dest_catch_funclet {
294 self.switch_to_block(dest);
297 // Write results to outputs
298 for (idx, op) in operands.iter().enumerate() {
299 if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
300 | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
302 let value = if output_types.len() == 1 {
305 self.extract_value(result, op_idx[&idx] as u64)
307 let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
308 OperandValue::Immediate(value).store(self, place);
314 impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> {
315 fn codegen_global_asm(
317 template: &[InlineAsmTemplatePiece],
318 operands: &[GlobalAsmOperandRef<'tcx>],
319 options: InlineAsmOptions,
320 _line_spans: &[Span],
322 let asm_arch = self.tcx.sess.asm_arch.unwrap();
324 // Default to Intel syntax on x86
325 let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
326 && !options.contains(InlineAsmOptions::ATT_SYNTAX);
328 // Build the template string
329 let mut template_str = String::new();
331 template_str.push_str(".intel_syntax\n");
333 for piece in template {
335 InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
336 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
337 match operands[operand_idx] {
338 GlobalAsmOperandRef::Const { ref string } => {
339 // Const operands get injected directly into the
340 // template. Note that we don't need to escape $
341 // here unlike normal inline assembly.
342 template_str.push_str(string);
344 GlobalAsmOperandRef::SymFn { instance } => {
345 let llval = self.get_fn(instance);
346 self.add_compiler_used_global(llval);
347 let symbol = llvm::build_string(|s| unsafe {
348 llvm::LLVMRustGetMangledName(llval, s);
350 .expect("symbol is not valid UTF-8");
351 template_str.push_str(&symbol);
353 GlobalAsmOperandRef::SymStatic { def_id } => {
359 .unwrap_or_else(|| self.get_static(def_id));
360 self.add_compiler_used_global(llval);
361 let symbol = llvm::build_string(|s| unsafe {
362 llvm::LLVMRustGetMangledName(llval, s);
364 .expect("symbol is not valid UTF-8");
365 template_str.push_str(&symbol);
372 template_str.push_str("\n.att_syntax\n");
376 llvm::LLVMRustAppendModuleInlineAsm(
378 template_str.as_ptr().cast(),
385 pub(crate) fn inline_asm_call<'ll>(
386 bx: &mut Builder<'_, 'll, '_>,
389 inputs: &[&'ll Value],
390 output: &'ll llvm::Type,
393 dia: llvm::AsmDialect,
396 dest_catch_funclet: Option<(
397 &'ll llvm::BasicBlock,
398 &'ll llvm::BasicBlock,
399 Option<&Funclet<'ll>>,
401 ) -> Option<&'ll Value> {
402 let volatile = if volatile { llvm::True } else { llvm::False };
403 let alignstack = if alignstack { llvm::True } else { llvm::False };
404 let can_throw = if unwind { llvm::True } else { llvm::False };
409 debug!("Asm Input Type: {:?}", *v);
412 .collect::<Vec<_>>();
414 debug!("Asm Output Type: {:?}", output);
415 let fty = bx.cx.type_func(&argtys, output);
417 // Ask LLVM to verify that the constraints are well-formed.
418 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
419 debug!("constraint verification result: {:?}", constraints_ok);
421 let v = llvm::LLVMRustInlineAsm(
425 cons.as_ptr().cast(),
433 let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
434 bx.invoke(fty, v, inputs, dest, catch, funclet)
436 bx.call(fty, v, inputs, None)
439 // Store mark in a metadata node so we can map LLVM errors
440 // back to source locations. See #17552.
442 let kind = llvm::LLVMGetMDKindIDInContext(
444 key.as_ptr() as *const c_char,
448 // srcloc contains one integer for each line of assembly code.
449 // Unfortunately this isn't enough to encode a full span so instead
450 // we just encode the start position of each line.
451 // FIXME: Figure out a way to pass the entire line spans.
452 let mut srcloc = vec![];
453 if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
454 // LLVM inserts an extra line to add the ".intel_syntax", so add
455 // a dummy srcloc entry for it.
457 // Don't do this if we only have 1 line span since that may be
458 // due to the asm template string coming from a macro. LLVM will
459 // default to the first srcloc for lines that don't have an
460 // associated srcloc.
461 srcloc.push(bx.const_i32(0));
463 srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
464 let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
465 llvm::LLVMSetMetadata(call, kind, md);
469 // LLVM has detected an issue with our constraints, bail out
475 /// If the register is an xmm/ymm/zmm register then return its index.
476 fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
478 InlineAsmReg::X86(reg)
479 if reg as u32 >= X86InlineAsmReg::xmm0 as u32
480 && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
482 Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
484 InlineAsmReg::X86(reg)
485 if reg as u32 >= X86InlineAsmReg::ymm0 as u32
486 && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
488 Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
490 InlineAsmReg::X86(reg)
491 if reg as u32 >= X86InlineAsmReg::zmm0 as u32
492 && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
494 Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
500 /// If the register is an AArch64 vector register then return its index.
501 fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
503 InlineAsmReg::AArch64(reg)
504 if reg as u32 >= AArch64InlineAsmReg::v0 as u32
505 && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
507 Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
513 /// Converts a register class to an LLVM constraint code.
514 fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
516 // For vector registers LLVM wants the register name to match the type size.
517 InlineAsmRegOrRegClass::Reg(reg) => {
518 if let Some(idx) = xmm_reg_index(reg) {
519 let class = if let Some(layout) = layout {
520 match layout.size.bytes() {
526 // We use f32 as the type for discarded outputs
529 format!("{{{}mm{}}}", class, idx)
530 } else if let Some(idx) = a64_vreg_index(reg) {
531 let class = if let Some(layout) = layout {
532 match layout.size.bytes() {
537 1 => 'd', // We fixup i8 to i8x8
541 // We use i64x2 as the type for discarded outputs
544 format!("{{{}{}}}", class, idx)
545 } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
546 // LLVM doesn't recognize x30
548 } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
549 // LLVM doesn't recognize r14
552 format!("{{{}}}", reg.name())
555 InlineAsmRegOrRegClass::RegClass(reg) => match reg {
556 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
557 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
558 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
559 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
560 unreachable!("clobber-only")
562 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
563 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
564 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
565 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
566 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
567 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
568 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
569 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
570 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
571 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
572 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
573 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
574 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
575 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
576 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
577 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
578 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
579 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
580 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
581 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
582 unreachable!("clobber-only")
584 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
585 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
586 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
587 unreachable!("clobber-only")
589 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
590 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
591 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
592 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
593 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
594 InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
595 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
596 InlineAsmRegClass::X86(
597 X86InlineAsmRegClass::x87_reg
598 | X86InlineAsmRegClass::mmx_reg
599 | X86InlineAsmRegClass::kreg0
600 | X86InlineAsmRegClass::tmm_reg,
601 ) => unreachable!("clobber-only"),
602 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
603 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
604 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
605 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
606 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
607 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
608 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
609 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
610 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
611 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
612 InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
613 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
614 bug!("LLVM backend does not support SPIR-V")
616 InlineAsmRegClass::Err => unreachable!(),
622 /// Converts a modifier into LLVM's equivalent modifier.
625 reg: InlineAsmRegClass,
626 modifier: Option<char>,
629 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
630 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
631 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
632 if modifier == Some('v') { None } else { modifier }
634 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
635 unreachable!("clobber-only")
637 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
638 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
639 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
640 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
641 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
642 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
643 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
644 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
645 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
646 if modifier.is_none() {
652 InlineAsmRegClass::Hexagon(_) => None,
653 InlineAsmRegClass::Mips(_) => None,
654 InlineAsmRegClass::Nvptx(_) => None,
655 InlineAsmRegClass::PowerPC(_) => None,
656 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
657 | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
658 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
659 unreachable!("clobber-only")
661 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
662 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
663 None if arch == InlineAsmArch::X86_64 => Some('q'),
665 Some('l') => Some('b'),
666 Some('h') => Some('h'),
667 Some('x') => Some('w'),
668 Some('e') => Some('k'),
669 Some('r') => Some('q'),
672 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
673 InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
674 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
675 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
676 (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
677 (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
678 (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
679 (_, Some('x')) => Some('x'),
680 (_, Some('y')) => Some('t'),
681 (_, Some('z')) => Some('g'),
684 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
685 InlineAsmRegClass::X86(
686 X86InlineAsmRegClass::x87_reg
687 | X86InlineAsmRegClass::mmx_reg
688 | X86InlineAsmRegClass::kreg0
689 | X86InlineAsmRegClass::tmm_reg,
691 unreachable!("clobber-only")
693 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
694 InlineAsmRegClass::Bpf(_) => None,
695 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
696 | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
697 | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
698 Some('h') => Some('B'),
699 Some('l') => Some('A'),
702 InlineAsmRegClass::Avr(_) => None,
703 InlineAsmRegClass::S390x(_) => None,
704 InlineAsmRegClass::Msp430(_) => None,
705 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
706 bug!("LLVM backend does not support SPIR-V")
708 InlineAsmRegClass::Err => unreachable!(),
712 /// Type to use for outputs that are discarded. It doesn't really matter what
713 /// the type is, as long as it is valid for the constraint code.
714 fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
716 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
717 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
718 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
719 cx.type_vector(cx.type_i64(), 2)
721 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
722 unreachable!("clobber-only")
724 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
725 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
726 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
727 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
728 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
729 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
730 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
731 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
732 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
733 cx.type_vector(cx.type_i64(), 2)
735 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
736 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
737 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
738 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
739 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
740 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
741 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
742 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
743 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
744 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
745 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
746 unreachable!("clobber-only")
748 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
749 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
750 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
751 unreachable!("clobber-only")
753 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
754 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
755 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
756 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
757 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
758 | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
759 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
760 InlineAsmRegClass::X86(
761 X86InlineAsmRegClass::x87_reg
762 | X86InlineAsmRegClass::mmx_reg
763 | X86InlineAsmRegClass::kreg0
764 | X86InlineAsmRegClass::tmm_reg,
766 unreachable!("clobber-only")
768 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
769 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
770 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
771 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
772 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
773 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
774 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
775 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
776 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
777 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
778 InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
779 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
780 bug!("LLVM backend does not support SPIR-V")
782 InlineAsmRegClass::Err => unreachable!(),
786 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
787 /// the equivalent integer type.
788 fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
789 match scalar.primitive() {
790 Primitive::Int(Integer::I8, _) => cx.type_i8(),
791 Primitive::Int(Integer::I16, _) => cx.type_i16(),
792 Primitive::Int(Integer::I32, _) => cx.type_i32(),
793 Primitive::Int(Integer::I64, _) => cx.type_i64(),
794 Primitive::F32 => cx.type_f32(),
795 Primitive::F64 => cx.type_f64(),
796 Primitive::Pointer => cx.type_isize(),
801 /// Fix up an input value to work around LLVM bugs.
802 fn llvm_fixup_input<'ll, 'tcx>(
803 bx: &mut Builder<'_, 'll, 'tcx>,
804 mut value: &'ll Value,
805 reg: InlineAsmRegClass,
806 layout: &TyAndLayout<'tcx>,
808 match (reg, layout.abi) {
809 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
810 if let Primitive::Int(Integer::I8, _) = s.primitive() {
811 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
812 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
817 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
818 let elem_ty = llvm_asm_scalar_type(bx.cx, s);
819 let count = 16 / layout.size.bytes();
820 let vec_ty = bx.cx.type_vector(elem_ty, count);
821 if let Primitive::Pointer = s.primitive() {
822 value = bx.ptrtoint(value, bx.cx.type_isize());
824 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
827 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
828 Abi::Vector { element, count },
829 ) if layout.size.bytes() == 8 => {
830 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
831 let vec_ty = bx.cx.type_vector(elem_ty, count);
832 let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
833 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
835 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
836 if s.primitive() == Primitive::F64 =>
838 bx.bitcast(value, bx.cx.type_i64())
841 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
843 ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
845 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
848 if let Primitive::Int(Integer::I32, _) = s.primitive() {
849 bx.bitcast(value, bx.cx.type_f32())
855 InlineAsmRegClass::Arm(
856 ArmInlineAsmRegClass::dreg
857 | ArmInlineAsmRegClass::dreg_low8
858 | ArmInlineAsmRegClass::dreg_low16,
862 if let Primitive::Int(Integer::I64, _) = s.primitive() {
863 bx.bitcast(value, bx.cx.type_f64())
868 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
869 match s.primitive() {
870 // MIPS only supports register-length arithmetics.
871 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
872 Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
873 Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
881 /// Fix up an output value to work around LLVM bugs.
882 fn llvm_fixup_output<'ll, 'tcx>(
883 bx: &mut Builder<'_, 'll, 'tcx>,
884 mut value: &'ll Value,
885 reg: InlineAsmRegClass,
886 layout: &TyAndLayout<'tcx>,
888 match (reg, layout.abi) {
889 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
890 if let Primitive::Int(Integer::I8, _) = s.primitive() {
891 bx.extract_element(value, bx.const_i32(0))
896 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
897 value = bx.extract_element(value, bx.const_i32(0));
898 if let Primitive::Pointer = s.primitive() {
899 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
904 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
905 Abi::Vector { element, count },
906 ) if layout.size.bytes() == 8 => {
907 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
908 let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
909 let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
910 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
912 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
913 if s.primitive() == Primitive::F64 =>
915 bx.bitcast(value, bx.cx.type_f64())
918 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
920 ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
922 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
925 if let Primitive::Int(Integer::I32, _) = s.primitive() {
926 bx.bitcast(value, bx.cx.type_i32())
932 InlineAsmRegClass::Arm(
933 ArmInlineAsmRegClass::dreg
934 | ArmInlineAsmRegClass::dreg_low8
935 | ArmInlineAsmRegClass::dreg_low16,
939 if let Primitive::Int(Integer::I64, _) = s.primitive() {
940 bx.bitcast(value, bx.cx.type_i64())
945 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
946 match s.primitive() {
947 // MIPS only supports register-length arithmetics.
948 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
949 Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
950 Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
951 Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
959 /// Output type to use for llvm_fixup_output.
960 fn llvm_fixup_output_type<'ll, 'tcx>(
961 cx: &CodegenCx<'ll, 'tcx>,
962 reg: InlineAsmRegClass,
963 layout: &TyAndLayout<'tcx>,
965 match (reg, layout.abi) {
966 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
967 if let Primitive::Int(Integer::I8, _) = s.primitive() {
968 cx.type_vector(cx.type_i8(), 8)
973 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
974 let elem_ty = llvm_asm_scalar_type(cx, s);
975 let count = 16 / layout.size.bytes();
976 cx.type_vector(elem_ty, count)
979 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
980 Abi::Vector { element, count },
981 ) if layout.size.bytes() == 8 => {
982 let elem_ty = llvm_asm_scalar_type(cx, element);
983 cx.type_vector(elem_ty, count * 2)
985 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
986 if s.primitive() == Primitive::F64 =>
991 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
993 ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
995 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
998 if let Primitive::Int(Integer::I32, _) = s.primitive() {
1001 layout.llvm_type(cx)
1005 InlineAsmRegClass::Arm(
1006 ArmInlineAsmRegClass::dreg
1007 | ArmInlineAsmRegClass::dreg_low8
1008 | ArmInlineAsmRegClass::dreg_low16,
1012 if let Primitive::Int(Integer::I64, _) = s.primitive() {
1015 layout.llvm_type(cx)
1018 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
1019 match s.primitive() {
1020 // MIPS only supports register-length arithmetics.
1021 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
1022 Primitive::F32 => cx.type_i32(),
1023 Primitive::F64 => cx.type_i64(),
1024 _ => layout.llvm_type(cx),
1027 _ => layout.llvm_type(cx),