2 use crate::builder::Builder;
3 use crate::common::Funclet;
4 use crate::context::CodegenCx;
7 use crate::type_::Type;
8 use crate::type_of::LayoutLlvmExt;
9 use crate::value::Value;
11 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
12 use rustc_codegen_ssa::mir::operand::OperandValue;
13 use rustc_codegen_ssa::traits::*;
14 use rustc_data_structures::fx::FxHashMap;
15 use rustc_middle::ty::layout::TyAndLayout;
16 use rustc_middle::{bug, span_bug, ty::Instance};
17 use rustc_span::{Pos, Span};
18 use rustc_target::abi::*;
19 use rustc_target::asm::*;
21 use libc::{c_char, c_uint};
22 use smallvec::SmallVec;
25 impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
26 fn codegen_inline_asm(
28 template: &[InlineAsmTemplatePiece],
29 operands: &[InlineAsmOperandRef<'tcx, Self>],
30 options: InlineAsmOptions,
32 instance: Instance<'_>,
33 dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
35 let asm_arch = self.tcx.sess.asm_arch.unwrap();
37 // Collect the types of output operands
38 let mut constraints = vec![];
39 let mut clobbers = vec![];
40 let mut output_types = vec![];
41 let mut op_idx = FxHashMap::default();
42 let mut clobbered_x87 = false;
43 for (idx, op) in operands.iter().enumerate() {
45 InlineAsmOperandRef::Out { reg, late, place } => {
46 let is_target_supported = |reg_class: InlineAsmRegClass| {
47 for &(_, feature) in reg_class.supported_types(asm_arch) {
48 if let Some(feature) = feature {
49 let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
50 if self.tcx.sess.target_features.contains(&feature)
51 || codegen_fn_attrs.target_features.contains(&feature)
56 // Register class is unconditionally supported
63 let mut layout = None;
64 let ty = if let Some(ref place) = place {
65 layout = Some(&place.layout);
66 llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
69 InlineAsmRegClass::X86(
70 X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
73 // Special handling for x87/mmx registers: we always
74 // clobber the whole set if one register is marked as
75 // clobbered. This is due to the way LLVM handles the
76 // FP stack in inline assembly.
79 clobbers.push("~{st}".to_string());
81 clobbers.push(format!("~{{st({})}}", i));
85 } else if !is_target_supported(reg.reg_class())
86 || reg.reg_class().is_clobber_only(asm_arch)
88 // We turn discarded outputs into clobber constraints
89 // if the target feature needed by the register class is
90 // disabled. This is necessary otherwise LLVM will try
91 // to actually allocate a register for the dummy output.
92 assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
93 clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
96 // If the output is discarded, we don't really care what
97 // type is used. We're just using this to tell LLVM to
98 // reserve the register.
99 dummy_output_type(self.cx, reg.reg_class())
101 output_types.push(ty);
102 op_idx.insert(idx, constraints.len());
103 let prefix = if late { "=" } else { "=&" };
104 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
106 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
107 let layout = if let Some(ref out_place) = out_place {
110 // LLVM required tied operands to have the same type,
111 // so we just use the type of the input.
114 let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
115 output_types.push(ty);
116 op_idx.insert(idx, constraints.len());
117 let prefix = if late { "=" } else { "=&" };
118 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
124 // Collect input operands
125 let mut inputs = vec![];
126 for (idx, op) in operands.iter().enumerate() {
128 InlineAsmOperandRef::In { reg, value } => {
130 llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
132 op_idx.insert(idx, constraints.len());
133 constraints.push(reg_to_llvm(reg, Some(&value.layout)));
135 InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
136 let value = llvm_fixup_input(
138 in_value.immediate(),
143 constraints.push(format!("{}", op_idx[&idx]));
145 InlineAsmOperandRef::SymFn { instance } => {
146 inputs.push(self.cx.get_fn(instance));
147 op_idx.insert(idx, constraints.len());
148 constraints.push("s".to_string());
150 InlineAsmOperandRef::SymStatic { def_id } => {
151 inputs.push(self.cx.get_static(def_id));
152 op_idx.insert(idx, constraints.len());
153 constraints.push("s".to_string());
159 // Build the template string
160 let mut template_str = String::new();
161 for piece in template {
163 InlineAsmTemplatePiece::String(ref s) => {
167 template_str.push_str("$$");
169 template_str.push(c);
173 template_str.push_str(s)
176 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
177 match operands[operand_idx] {
178 InlineAsmOperandRef::In { reg, .. }
179 | InlineAsmOperandRef::Out { reg, .. }
180 | InlineAsmOperandRef::InOut { reg, .. } => {
181 let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
182 if let Some(modifier) = modifier {
183 template_str.push_str(&format!(
185 op_idx[&operand_idx], modifier
188 template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
191 InlineAsmOperandRef::Const { ref string } => {
192 // Const operands get injected directly into the template
193 template_str.push_str(string);
195 InlineAsmOperandRef::SymFn { .. }
196 | InlineAsmOperandRef::SymStatic { .. } => {
197 // Only emit the raw symbol name
198 template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
205 constraints.append(&mut clobbers);
206 if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
208 InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
209 constraints.push("~{cc}".to_string());
211 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
212 constraints.extend_from_slice(&[
213 "~{dirflag}".to_string(),
214 "~{fpsr}".to_string(),
215 "~{flags}".to_string(),
218 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
219 constraints.extend_from_slice(&[
220 "~{vtype}".to_string(),
222 "~{vxsat}".to_string(),
223 "~{vxrm}".to_string(),
226 InlineAsmArch::Avr => {
227 constraints.push("~{sreg}".to_string());
229 InlineAsmArch::Nvptx64 => {}
230 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
231 InlineAsmArch::Hexagon => {}
232 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
233 InlineAsmArch::S390x => {}
234 InlineAsmArch::SpirV => {}
235 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
236 InlineAsmArch::Bpf => {}
237 InlineAsmArch::Msp430 => {
238 constraints.push("~{sr}".to_string());
242 if !options.contains(InlineAsmOptions::NOMEM) {
243 // This is actually ignored by LLVM, but it's probably best to keep
244 // it just in case. LLVM instead uses the ReadOnly/ReadNone
245 // attributes on the call instruction to optimize.
246 constraints.push("~{memory}".to_string());
248 let volatile = !options.contains(InlineAsmOptions::PURE);
249 let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
250 let output_type = match &output_types[..] {
251 [] => self.type_void(),
253 tys => self.type_struct(tys, false),
255 let dialect = match asm_arch {
256 InlineAsmArch::X86 | InlineAsmArch::X86_64
257 if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
259 llvm::AsmDialect::Intel
261 _ => llvm::AsmDialect::Att,
263 let result = inline_asm_call(
266 &constraints.join(","),
273 options.contains(InlineAsmOptions::MAY_UNWIND),
276 .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
278 let mut attrs = SmallVec::<[_; 2]>::new();
279 if options.contains(InlineAsmOptions::PURE) {
280 if options.contains(InlineAsmOptions::NOMEM) {
281 attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx));
282 } else if options.contains(InlineAsmOptions::READONLY) {
283 attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx));
285 attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
286 } else if options.contains(InlineAsmOptions::NOMEM) {
287 attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx));
289 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
291 attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
293 // Switch to the 'normal' basic block if we did an `invoke` instead of a `call`
294 if let Some((dest, _, _)) = dest_catch_funclet {
295 self.switch_to_block(dest);
298 // Write results to outputs
299 for (idx, op) in operands.iter().enumerate() {
300 if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
301 | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
303 let value = if output_types.len() == 1 {
306 self.extract_value(result, op_idx[&idx] as u64)
308 let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
309 OperandValue::Immediate(value).store(self, place);
315 impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> {
316 fn codegen_global_asm(
318 template: &[InlineAsmTemplatePiece],
319 operands: &[GlobalAsmOperandRef<'tcx>],
320 options: InlineAsmOptions,
321 _line_spans: &[Span],
323 let asm_arch = self.tcx.sess.asm_arch.unwrap();
325 // Default to Intel syntax on x86
326 let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
327 && !options.contains(InlineAsmOptions::ATT_SYNTAX);
329 // Build the template string
330 let mut template_str = String::new();
332 template_str.push_str(".intel_syntax\n");
334 for piece in template {
336 InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
337 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
338 match operands[operand_idx] {
339 GlobalAsmOperandRef::Const { ref string } => {
340 // Const operands get injected directly into the
341 // template. Note that we don't need to escape $
342 // here unlike normal inline assembly.
343 template_str.push_str(string);
345 GlobalAsmOperandRef::SymFn { instance } => {
346 let llval = self.get_fn(instance);
347 self.add_compiler_used_global(llval);
348 let symbol = llvm::build_string(|s| unsafe {
349 llvm::LLVMRustGetMangledName(llval, s);
351 .expect("symbol is not valid UTF-8");
352 template_str.push_str(&symbol);
354 GlobalAsmOperandRef::SymStatic { def_id } => {
360 .unwrap_or_else(|| self.get_static(def_id));
361 self.add_compiler_used_global(llval);
362 let symbol = llvm::build_string(|s| unsafe {
363 llvm::LLVMRustGetMangledName(llval, s);
365 .expect("symbol is not valid UTF-8");
366 template_str.push_str(&symbol);
373 template_str.push_str("\n.att_syntax\n");
377 llvm::LLVMRustAppendModuleInlineAsm(
379 template_str.as_ptr().cast(),
386 pub(crate) fn inline_asm_call<'ll>(
387 bx: &mut Builder<'_, 'll, '_>,
390 inputs: &[&'ll Value],
391 output: &'ll llvm::Type,
394 dia: llvm::AsmDialect,
397 dest_catch_funclet: Option<(
398 &'ll llvm::BasicBlock,
399 &'ll llvm::BasicBlock,
400 Option<&Funclet<'ll>>,
402 ) -> Option<&'ll Value> {
403 let volatile = if volatile { llvm::True } else { llvm::False };
404 let alignstack = if alignstack { llvm::True } else { llvm::False };
405 let can_throw = if unwind { llvm::True } else { llvm::False };
410 debug!("Asm Input Type: {:?}", *v);
413 .collect::<Vec<_>>();
415 debug!("Asm Output Type: {:?}", output);
416 let fty = bx.cx.type_func(&argtys, output);
418 // Ask LLVM to verify that the constraints are well-formed.
419 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
420 debug!("constraint verification result: {:?}", constraints_ok);
422 if unwind && llvm_util::get_version() < (13, 0, 0) {
423 bx.cx.sess().span_fatal(
425 "unwinding from inline assembly is only supported on llvm >= 13.",
429 let v = llvm::LLVMRustInlineAsm(
433 cons.as_ptr().cast(),
441 let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
442 bx.invoke(fty, v, inputs, dest, catch, funclet)
444 bx.call(fty, v, inputs, None)
447 // Store mark in a metadata node so we can map LLVM errors
448 // back to source locations. See #17552.
450 let kind = llvm::LLVMGetMDKindIDInContext(
452 key.as_ptr() as *const c_char,
456 // srcloc contains one integer for each line of assembly code.
457 // Unfortunately this isn't enough to encode a full span so instead
458 // we just encode the start position of each line.
459 // FIXME: Figure out a way to pass the entire line spans.
460 let mut srcloc = vec![];
461 if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
462 // LLVM inserts an extra line to add the ".intel_syntax", so add
463 // a dummy srcloc entry for it.
465 // Don't do this if we only have 1 line span since that may be
466 // due to the asm template string coming from a macro. LLVM will
467 // default to the first srcloc for lines that don't have an
468 // associated srcloc.
469 srcloc.push(bx.const_i32(0));
471 srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
472 let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
473 llvm::LLVMSetMetadata(call, kind, md);
477 // LLVM has detected an issue with our constraints, bail out
483 /// If the register is an xmm/ymm/zmm register then return its index.
484 fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
486 InlineAsmReg::X86(reg)
487 if reg as u32 >= X86InlineAsmReg::xmm0 as u32
488 && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
490 Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
492 InlineAsmReg::X86(reg)
493 if reg as u32 >= X86InlineAsmReg::ymm0 as u32
494 && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
496 Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
498 InlineAsmReg::X86(reg)
499 if reg as u32 >= X86InlineAsmReg::zmm0 as u32
500 && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
502 Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
508 /// If the register is an AArch64 vector register then return its index.
509 fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
511 InlineAsmReg::AArch64(reg)
512 if reg as u32 >= AArch64InlineAsmReg::v0 as u32
513 && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
515 Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
521 /// Converts a register class to an LLVM constraint code.
522 fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
524 // For vector registers LLVM wants the register name to match the type size.
525 InlineAsmRegOrRegClass::Reg(reg) => {
526 if let Some(idx) = xmm_reg_index(reg) {
527 let class = if let Some(layout) = layout {
528 match layout.size.bytes() {
534 // We use f32 as the type for discarded outputs
537 format!("{{{}mm{}}}", class, idx)
538 } else if let Some(idx) = a64_vreg_index(reg) {
539 let class = if let Some(layout) = layout {
540 match layout.size.bytes() {
545 1 => 'd', // We fixup i8 to i8x8
549 // We use i64x2 as the type for discarded outputs
552 format!("{{{}{}}}", class, idx)
553 } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
554 // LLVM doesn't recognize x30
556 } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
557 // LLVM doesn't recognize r14
560 format!("{{{}}}", reg.name())
563 InlineAsmRegOrRegClass::RegClass(reg) => match reg {
564 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
565 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
566 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
567 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
568 unreachable!("clobber-only")
570 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
571 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
572 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
573 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
574 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
575 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
576 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
577 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
578 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
579 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
580 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
581 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
582 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
583 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
584 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
585 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
586 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
587 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
588 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
589 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
590 unreachable!("clobber-only")
592 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
593 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
594 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
595 unreachable!("clobber-only")
597 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
598 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
599 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
600 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
601 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
602 InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
603 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
604 InlineAsmRegClass::X86(
605 X86InlineAsmRegClass::x87_reg
606 | X86InlineAsmRegClass::mmx_reg
607 | X86InlineAsmRegClass::kreg0
608 | X86InlineAsmRegClass::tmm_reg,
609 ) => unreachable!("clobber-only"),
610 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
611 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
612 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
613 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
614 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
615 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
616 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
617 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
618 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
619 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
620 InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
621 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
622 bug!("LLVM backend does not support SPIR-V")
624 InlineAsmRegClass::Err => unreachable!(),
630 /// Converts a modifier into LLVM's equivalent modifier.
633 reg: InlineAsmRegClass,
634 modifier: Option<char>,
637 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
638 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
639 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
640 if modifier == Some('v') { None } else { modifier }
642 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
643 unreachable!("clobber-only")
645 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
646 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
647 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
648 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
649 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
650 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
651 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
652 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
653 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
654 if modifier.is_none() {
660 InlineAsmRegClass::Hexagon(_) => None,
661 InlineAsmRegClass::Mips(_) => None,
662 InlineAsmRegClass::Nvptx(_) => None,
663 InlineAsmRegClass::PowerPC(_) => None,
664 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
665 | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
666 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
667 unreachable!("clobber-only")
669 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
670 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
671 None if arch == InlineAsmArch::X86_64 => Some('q'),
673 Some('l') => Some('b'),
674 Some('h') => Some('h'),
675 Some('x') => Some('w'),
676 Some('e') => Some('k'),
677 Some('r') => Some('q'),
680 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
681 InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
682 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
683 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
684 (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
685 (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
686 (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
687 (_, Some('x')) => Some('x'),
688 (_, Some('y')) => Some('t'),
689 (_, Some('z')) => Some('g'),
692 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
693 InlineAsmRegClass::X86(
694 X86InlineAsmRegClass::x87_reg
695 | X86InlineAsmRegClass::mmx_reg
696 | X86InlineAsmRegClass::kreg0
697 | X86InlineAsmRegClass::tmm_reg,
699 unreachable!("clobber-only")
701 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
702 InlineAsmRegClass::Bpf(_) => None,
703 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
704 | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
705 | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
706 Some('h') => Some('B'),
707 Some('l') => Some('A'),
710 InlineAsmRegClass::Avr(_) => None,
711 InlineAsmRegClass::S390x(_) => None,
712 InlineAsmRegClass::Msp430(_) => None,
713 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
714 bug!("LLVM backend does not support SPIR-V")
716 InlineAsmRegClass::Err => unreachable!(),
720 /// Type to use for outputs that are discarded. It doesn't really matter what
721 /// the type is, as long as it is valid for the constraint code.
722 fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
724 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
725 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
726 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
727 cx.type_vector(cx.type_i64(), 2)
729 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
730 unreachable!("clobber-only")
732 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
733 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
734 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
735 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
736 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
737 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
738 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
739 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
740 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
741 cx.type_vector(cx.type_i64(), 2)
743 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
744 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
745 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
746 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
747 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
748 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
749 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
750 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
751 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
752 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
753 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
754 unreachable!("clobber-only")
756 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
757 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
758 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
759 unreachable!("clobber-only")
761 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
762 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
763 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
764 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
765 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
766 | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
767 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
768 InlineAsmRegClass::X86(
769 X86InlineAsmRegClass::x87_reg
770 | X86InlineAsmRegClass::mmx_reg
771 | X86InlineAsmRegClass::kreg0
772 | X86InlineAsmRegClass::tmm_reg,
774 unreachable!("clobber-only")
776 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
777 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
778 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
779 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
780 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
781 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
782 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
783 InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
784 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
785 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
786 InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
787 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
788 bug!("LLVM backend does not support SPIR-V")
790 InlineAsmRegClass::Err => unreachable!(),
794 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
795 /// the equivalent integer type.
796 fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
797 match scalar.primitive() {
798 Primitive::Int(Integer::I8, _) => cx.type_i8(),
799 Primitive::Int(Integer::I16, _) => cx.type_i16(),
800 Primitive::Int(Integer::I32, _) => cx.type_i32(),
801 Primitive::Int(Integer::I64, _) => cx.type_i64(),
802 Primitive::F32 => cx.type_f32(),
803 Primitive::F64 => cx.type_f64(),
804 Primitive::Pointer => cx.type_isize(),
809 /// Fix up an input value to work around LLVM bugs.
810 fn llvm_fixup_input<'ll, 'tcx>(
811 bx: &mut Builder<'_, 'll, 'tcx>,
812 mut value: &'ll Value,
813 reg: InlineAsmRegClass,
814 layout: &TyAndLayout<'tcx>,
816 match (reg, layout.abi) {
817 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
818 if let Primitive::Int(Integer::I8, _) = s.primitive() {
819 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
820 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
825 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
826 let elem_ty = llvm_asm_scalar_type(bx.cx, s);
827 let count = 16 / layout.size.bytes();
828 let vec_ty = bx.cx.type_vector(elem_ty, count);
829 if let Primitive::Pointer = s.primitive() {
830 value = bx.ptrtoint(value, bx.cx.type_isize());
832 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
835 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
836 Abi::Vector { element, count },
837 ) if layout.size.bytes() == 8 => {
838 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
839 let vec_ty = bx.cx.type_vector(elem_ty, count);
840 let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
841 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
843 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
844 if s.primitive() == Primitive::F64 =>
846 bx.bitcast(value, bx.cx.type_i64())
849 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
851 ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
853 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
856 if let Primitive::Int(Integer::I32, _) = s.primitive() {
857 bx.bitcast(value, bx.cx.type_f32())
863 InlineAsmRegClass::Arm(
864 ArmInlineAsmRegClass::dreg
865 | ArmInlineAsmRegClass::dreg_low8
866 | ArmInlineAsmRegClass::dreg_low16,
870 if let Primitive::Int(Integer::I64, _) = s.primitive() {
871 bx.bitcast(value, bx.cx.type_f64())
876 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
877 match s.primitive() {
878 // MIPS only supports register-length arithmetics.
879 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
880 Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
881 Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
889 /// Fix up an output value to work around LLVM bugs.
890 fn llvm_fixup_output<'ll, 'tcx>(
891 bx: &mut Builder<'_, 'll, 'tcx>,
892 mut value: &'ll Value,
893 reg: InlineAsmRegClass,
894 layout: &TyAndLayout<'tcx>,
896 match (reg, layout.abi) {
897 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
898 if let Primitive::Int(Integer::I8, _) = s.primitive() {
899 bx.extract_element(value, bx.const_i32(0))
904 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
905 value = bx.extract_element(value, bx.const_i32(0));
906 if let Primitive::Pointer = s.primitive() {
907 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
912 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
913 Abi::Vector { element, count },
914 ) if layout.size.bytes() == 8 => {
915 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
916 let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
917 let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
918 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
920 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
921 if s.primitive() == Primitive::F64 =>
923 bx.bitcast(value, bx.cx.type_f64())
926 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
928 ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
930 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
933 if let Primitive::Int(Integer::I32, _) = s.primitive() {
934 bx.bitcast(value, bx.cx.type_i32())
940 InlineAsmRegClass::Arm(
941 ArmInlineAsmRegClass::dreg
942 | ArmInlineAsmRegClass::dreg_low8
943 | ArmInlineAsmRegClass::dreg_low16,
947 if let Primitive::Int(Integer::I64, _) = s.primitive() {
948 bx.bitcast(value, bx.cx.type_i64())
953 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
954 match s.primitive() {
955 // MIPS only supports register-length arithmetics.
956 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
957 Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
958 Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
959 Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
967 /// Output type to use for llvm_fixup_output.
968 fn llvm_fixup_output_type<'ll, 'tcx>(
969 cx: &CodegenCx<'ll, 'tcx>,
970 reg: InlineAsmRegClass,
971 layout: &TyAndLayout<'tcx>,
973 match (reg, layout.abi) {
974 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
975 if let Primitive::Int(Integer::I8, _) = s.primitive() {
976 cx.type_vector(cx.type_i8(), 8)
981 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
982 let elem_ty = llvm_asm_scalar_type(cx, s);
983 let count = 16 / layout.size.bytes();
984 cx.type_vector(elem_ty, count)
987 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
988 Abi::Vector { element, count },
989 ) if layout.size.bytes() == 8 => {
990 let elem_ty = llvm_asm_scalar_type(cx, element);
991 cx.type_vector(elem_ty, count * 2)
993 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
994 if s.primitive() == Primitive::F64 =>
999 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1001 ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
1003 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1006 if let Primitive::Int(Integer::I32, _) = s.primitive() {
1009 layout.llvm_type(cx)
1013 InlineAsmRegClass::Arm(
1014 ArmInlineAsmRegClass::dreg
1015 | ArmInlineAsmRegClass::dreg_low8
1016 | ArmInlineAsmRegClass::dreg_low16,
1020 if let Primitive::Int(Integer::I64, _) = s.primitive() {
1023 layout.llvm_type(cx)
1026 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
1027 match s.primitive() {
1028 // MIPS only supports register-length arithmetics.
1029 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
1030 Primitive::F32 => cx.type_i32(),
1031 Primitive::F64 => cx.type_i64(),
1032 _ => layout.llvm_type(cx),
1035 _ => layout.llvm_type(cx),