1 use crate::builder::Builder;
2 use crate::context::CodegenCx;
4 use crate::type_::Type;
5 use crate::type_of::LayoutLlvmExt;
6 use crate::value::Value;
8 use rustc_ast::LlvmAsmDialect;
9 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
10 use rustc_codegen_ssa::mir::operand::OperandValue;
11 use rustc_codegen_ssa::mir::place::PlaceRef;
12 use rustc_codegen_ssa::traits::*;
13 use rustc_data_structures::fx::FxHashMap;
15 use rustc_middle::ty::layout::TyAndLayout;
16 use rustc_middle::{bug, span_bug, ty::Instance};
17 use rustc_span::{Pos, Span, Symbol};
18 use rustc_target::abi::*;
19 use rustc_target::asm::*;
21 use libc::{c_char, c_uint};
24 impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
25 fn codegen_llvm_inline_asm(
27 ia: &hir::LlvmInlineAsmInner,
28 outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
29 mut inputs: Vec<&'ll Value>,
32 let mut ext_constraints = vec![];
33 let mut output_types = vec![];
35 // Prepare the output operands
36 let mut indirect_outputs = vec![];
37 for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
39 let operand = self.load_operand(place);
40 if let OperandValue::Immediate(_) = operand.val {
41 inputs.push(operand.immediate());
43 ext_constraints.push(i.to_string());
46 let operand = self.load_operand(place);
47 if let OperandValue::Immediate(_) = operand.val {
48 indirect_outputs.push(operand.immediate());
51 output_types.push(place.layout.llvm_type(self.cx));
54 if !indirect_outputs.is_empty() {
55 indirect_outputs.extend_from_slice(&inputs);
56 inputs = indirect_outputs;
59 let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
61 // Default per-arch clobbers
62 // Basically what clang does
63 let arch_clobbers = match &self.sess().target.arch[..] {
64 "x86" | "x86_64" => &["~{dirflag}", "~{fpsr}", "~{flags}"][..],
65 "mips" | "mips64" => &["~{$1}"],
69 let all_constraints = ia
72 .map(|out| out.constraint.to_string())
73 .chain(ia.inputs.iter().map(|s| s.to_string()))
74 .chain(ext_constraints)
76 .chain(arch_clobbers.iter().map(|s| (*s).to_string()))
77 .collect::<Vec<String>>()
80 debug!("Asm Constraints: {}", &all_constraints);
82 // Depending on how many outputs we have, the return type is different
83 let num_outputs = output_types.len();
84 let output_type = match num_outputs {
85 0 => self.type_void(),
87 _ => self.type_struct(&output_types, false),
90 let asm = ia.asm.as_str();
91 let r = inline_asm_call(
107 // Again, based on how many outputs we have
108 let outputs = ia.outputs.iter().zip(&outputs).filter(|&(o, _)| !o.is_indirect);
109 for (i, (_, &place)) in outputs.enumerate() {
110 let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
111 OperandValue::Immediate(v).store(self, place);
117 fn codegen_inline_asm(
119 template: &[InlineAsmTemplatePiece],
120 operands: &[InlineAsmOperandRef<'tcx, Self>],
121 options: InlineAsmOptions,
123 instance: Instance<'_>,
125 let asm_arch = self.tcx.sess.asm_arch.unwrap();
127 // Collect the types of output operands
128 let mut constraints = vec![];
129 let mut clobbers = vec![];
130 let mut output_types = vec![];
131 let mut op_idx = FxHashMap::default();
132 let mut clobbered_x87 = false;
133 for (idx, op) in operands.iter().enumerate() {
135 InlineAsmOperandRef::Out { reg, late, place } => {
136 let is_target_supported = |reg_class: InlineAsmRegClass| {
137 for &(_, feature) in reg_class.supported_types(asm_arch) {
138 if let Some(feature) = feature {
139 let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
140 let feature_name = Symbol::intern(feature);
141 if self.tcx.sess.target_features.contains(&feature_name)
142 || codegen_fn_attrs.target_features.contains(&feature_name)
147 // Register class is unconditionally supported
154 let mut layout = None;
155 let ty = if let Some(ref place) = place {
156 layout = Some(&place.layout);
157 llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
160 InlineAsmRegClass::X86(
161 X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
164 // Special handling for x87/mmx registers: we always
165 // clobber the whole set if one register is marked as
166 // clobbered. This is due to the way LLVM handles the
167 // FP stack in inline assembly.
169 clobbered_x87 = true;
170 clobbers.push("~{st}".to_string());
172 clobbers.push(format!("~{{st({})}}", i));
176 } else if !is_target_supported(reg.reg_class())
177 || reg.reg_class().is_clobber_only(asm_arch)
179 // We turn discarded outputs into clobber constraints
180 // if the target feature needed by the register class is
181 // disabled. This is necessary otherwise LLVM will try
182 // to actually allocate a register for the dummy output.
183 assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
184 clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
187 // If the output is discarded, we don't really care what
188 // type is used. We're just using this to tell LLVM to
189 // reserve the register.
190 dummy_output_type(self.cx, reg.reg_class())
192 output_types.push(ty);
193 op_idx.insert(idx, constraints.len());
194 let prefix = if late { "=" } else { "=&" };
195 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
197 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
198 let layout = if let Some(ref out_place) = out_place {
201 // LLVM required tied operands to have the same type,
202 // so we just use the type of the input.
205 let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
206 output_types.push(ty);
207 op_idx.insert(idx, constraints.len());
208 let prefix = if late { "=" } else { "=&" };
209 constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
215 // Collect input operands
216 let mut inputs = vec![];
217 for (idx, op) in operands.iter().enumerate() {
219 InlineAsmOperandRef::In { reg, value } => {
221 llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
223 op_idx.insert(idx, constraints.len());
224 constraints.push(reg_to_llvm(reg, Some(&value.layout)));
226 InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
227 let value = llvm_fixup_input(
229 in_value.immediate(),
234 constraints.push(format!("{}", op_idx[&idx]));
236 InlineAsmOperandRef::SymFn { instance } => {
237 inputs.push(self.cx.get_fn(instance));
238 op_idx.insert(idx, constraints.len());
239 constraints.push("s".to_string());
241 InlineAsmOperandRef::SymStatic { def_id } => {
242 inputs.push(self.cx.get_static(def_id));
243 op_idx.insert(idx, constraints.len());
244 constraints.push("s".to_string());
250 // Build the template string
251 let mut template_str = String::new();
252 for piece in template {
254 InlineAsmTemplatePiece::String(ref s) => {
258 template_str.push_str("$$");
260 template_str.push(c);
264 template_str.push_str(s)
267 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
268 match operands[operand_idx] {
269 InlineAsmOperandRef::In { reg, .. }
270 | InlineAsmOperandRef::Out { reg, .. }
271 | InlineAsmOperandRef::InOut { reg, .. } => {
272 let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
273 if let Some(modifier) = modifier {
274 template_str.push_str(&format!(
276 op_idx[&operand_idx], modifier
279 template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
282 InlineAsmOperandRef::Const { ref string } => {
283 // Const operands get injected directly into the template
284 template_str.push_str(string);
286 InlineAsmOperandRef::SymFn { .. }
287 | InlineAsmOperandRef::SymStatic { .. } => {
288 // Only emit the raw symbol name
289 template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
296 constraints.append(&mut clobbers);
297 if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
299 InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
300 constraints.push("~{cc}".to_string());
302 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
303 constraints.extend_from_slice(&[
304 "~{dirflag}".to_string(),
305 "~{fpsr}".to_string(),
306 "~{flags}".to_string(),
309 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
310 constraints.extend_from_slice(&[
311 "~{vtype}".to_string(),
313 "~{vxsat}".to_string(),
314 "~{vxrm}".to_string(),
317 InlineAsmArch::Nvptx64 => {}
318 InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
319 InlineAsmArch::Hexagon => {}
320 InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
321 InlineAsmArch::S390x => {}
322 InlineAsmArch::SpirV => {}
323 InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
324 InlineAsmArch::Bpf => {}
327 if !options.contains(InlineAsmOptions::NOMEM) {
328 // This is actually ignored by LLVM, but it's probably best to keep
329 // it just in case. LLVM instead uses the ReadOnly/ReadNone
330 // attributes on the call instruction to optimize.
331 constraints.push("~{memory}".to_string());
333 let volatile = !options.contains(InlineAsmOptions::PURE);
334 let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
335 let output_type = match &output_types[..] {
336 [] => self.type_void(),
338 tys => self.type_struct(tys, false),
340 let dialect = match asm_arch {
341 InlineAsmArch::X86 | InlineAsmArch::X86_64
342 if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
344 LlvmAsmDialect::Intel
346 _ => LlvmAsmDialect::Att,
348 let result = inline_asm_call(
351 &constraints.join(","),
359 .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
361 if options.contains(InlineAsmOptions::PURE) {
362 if options.contains(InlineAsmOptions::NOMEM) {
363 llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
364 } else if options.contains(InlineAsmOptions::READONLY) {
365 llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
367 llvm::Attribute::WillReturn.apply_callsite(llvm::AttributePlace::Function, result);
368 } else if options.contains(InlineAsmOptions::NOMEM) {
369 llvm::Attribute::InaccessibleMemOnly
370 .apply_callsite(llvm::AttributePlace::Function, result);
372 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
375 // Write results to outputs
376 for (idx, op) in operands.iter().enumerate() {
377 if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
378 | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
380 let value = if output_types.len() == 1 {
383 self.extract_value(result, op_idx[&idx] as u64)
385 let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
386 OperandValue::Immediate(value).store(self, place);
392 impl AsmMethods for CodegenCx<'ll, 'tcx> {
393 fn codegen_global_asm(
395 template: &[InlineAsmTemplatePiece],
396 operands: &[GlobalAsmOperandRef],
397 options: InlineAsmOptions,
398 _line_spans: &[Span],
400 let asm_arch = self.tcx.sess.asm_arch.unwrap();
402 // Default to Intel syntax on x86
403 let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
404 && !options.contains(InlineAsmOptions::ATT_SYNTAX);
406 // Build the template string
407 let mut template_str = String::new();
409 template_str.push_str(".intel_syntax\n");
411 for piece in template {
413 InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
414 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
415 match operands[operand_idx] {
416 GlobalAsmOperandRef::Const { ref string } => {
417 // Const operands get injected directly into the
418 // template. Note that we don't need to escape $
419 // here unlike normal inline assembly.
420 template_str.push_str(string);
427 template_str.push_str("\n.att_syntax\n");
431 llvm::LLVMRustAppendModuleInlineAsm(
433 template_str.as_ptr().cast(),
440 pub(crate) fn inline_asm_call(
441 bx: &mut Builder<'a, 'll, 'tcx>,
444 inputs: &[&'ll Value],
445 output: &'ll llvm::Type,
450 ) -> Option<&'ll Value> {
451 let volatile = if volatile { llvm::True } else { llvm::False };
452 let alignstack = if alignstack { llvm::True } else { llvm::False };
457 debug!("Asm Input Type: {:?}", *v);
460 .collect::<Vec<_>>();
462 debug!("Asm Output Type: {:?}", output);
463 let fty = bx.cx.type_func(&argtys[..], output);
465 // Ask LLVM to verify that the constraints are well-formed.
466 let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
467 debug!("constraint verification result: {:?}", constraints_ok);
469 let v = llvm::LLVMRustInlineAsm(
473 cons.as_ptr().cast(),
477 llvm::AsmDialect::from_generic(dia),
479 let call = bx.call(fty, v, inputs, None);
481 // Store mark in a metadata node so we can map LLVM errors
482 // back to source locations. See #17552.
484 let kind = llvm::LLVMGetMDKindIDInContext(
486 key.as_ptr() as *const c_char,
490 // srcloc contains one integer for each line of assembly code.
491 // Unfortunately this isn't enough to encode a full span so instead
492 // we just encode the start position of each line.
493 // FIXME: Figure out a way to pass the entire line spans.
494 let mut srcloc = vec![];
495 if dia == LlvmAsmDialect::Intel && line_spans.len() > 1 {
496 // LLVM inserts an extra line to add the ".intel_syntax", so add
497 // a dummy srcloc entry for it.
499 // Don't do this if we only have 1 line span since that may be
500 // due to the asm template string coming from a macro. LLVM will
501 // default to the first srcloc for lines that don't have an
502 // associated srcloc.
503 srcloc.push(bx.const_i32(0));
505 srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
506 let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
507 llvm::LLVMSetMetadata(call, kind, md);
511 // LLVM has detected an issue with our constraints, bail out
517 /// If the register is an xmm/ymm/zmm register then return its index.
518 fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
520 InlineAsmReg::X86(reg)
521 if reg as u32 >= X86InlineAsmReg::xmm0 as u32
522 && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
524 Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
526 InlineAsmReg::X86(reg)
527 if reg as u32 >= X86InlineAsmReg::ymm0 as u32
528 && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
530 Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
532 InlineAsmReg::X86(reg)
533 if reg as u32 >= X86InlineAsmReg::zmm0 as u32
534 && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
536 Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
542 /// If the register is an AArch64 vector register then return its index.
543 fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
545 InlineAsmReg::AArch64(reg)
546 if reg as u32 >= AArch64InlineAsmReg::v0 as u32
547 && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
549 Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
555 /// Converts a register class to an LLVM constraint code.
556 fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>) -> String {
558 // For vector registers LLVM wants the register name to match the type size.
559 InlineAsmRegOrRegClass::Reg(reg) => {
560 if let Some(idx) = xmm_reg_index(reg) {
561 let class = if let Some(layout) = layout {
562 match layout.size.bytes() {
568 // We use f32 as the type for discarded outputs
571 format!("{{{}mm{}}}", class, idx)
572 } else if let Some(idx) = a64_vreg_index(reg) {
573 let class = if let Some(layout) = layout {
574 match layout.size.bytes() {
579 1 => 'd', // We fixup i8 to i8x8
583 // We use i64x2 as the type for discarded outputs
586 format!("{{{}{}}}", class, idx)
587 } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
588 // LLVM doesn't recognize x30
590 } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
591 // LLVM doesn't recognize r14
594 format!("{{{}}}", reg.name())
597 InlineAsmRegOrRegClass::RegClass(reg) => match reg {
598 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
599 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
600 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
601 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
602 unreachable!("clobber-only")
604 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
605 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => "l",
606 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
607 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
608 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
609 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
610 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
611 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
612 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
613 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
614 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
615 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
616 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
617 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
618 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
619 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
620 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
621 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
622 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
623 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
624 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
625 unreachable!("clobber-only")
627 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
628 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
629 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
630 unreachable!("clobber-only")
632 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
633 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
634 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
635 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
636 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
637 InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
638 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
639 InlineAsmRegClass::X86(
640 X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg,
641 ) => unreachable!("clobber-only"),
642 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
643 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
644 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
645 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
646 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
647 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
648 bug!("LLVM backend does not support SPIR-V")
650 InlineAsmRegClass::Err => unreachable!(),
656 /// Converts a modifier into LLVM's equivalent modifier.
659 reg: InlineAsmRegClass,
660 modifier: Option<char>,
663 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
664 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
665 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
666 if modifier == Some('v') { None } else { modifier }
668 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
669 unreachable!("clobber-only")
671 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
672 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => None,
673 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
674 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
675 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
676 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
677 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
678 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
679 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
680 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
681 if modifier.is_none() {
687 InlineAsmRegClass::Hexagon(_) => None,
688 InlineAsmRegClass::Mips(_) => None,
689 InlineAsmRegClass::Nvptx(_) => None,
690 InlineAsmRegClass::PowerPC(_) => None,
691 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
692 | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
693 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
694 unreachable!("clobber-only")
696 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
697 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
698 None if arch == InlineAsmArch::X86_64 => Some('q'),
700 Some('l') => Some('b'),
701 Some('h') => Some('h'),
702 Some('x') => Some('w'),
703 Some('e') => Some('k'),
704 Some('r') => Some('q'),
707 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
708 InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
709 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
710 | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
711 (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
712 (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
713 (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
714 (_, Some('x')) => Some('x'),
715 (_, Some('y')) => Some('t'),
716 (_, Some('z')) => Some('g'),
719 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
720 InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
721 unreachable!("clobber-only")
723 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
724 InlineAsmRegClass::Bpf(_) => None,
725 InlineAsmRegClass::S390x(_) => None,
726 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
727 bug!("LLVM backend does not support SPIR-V")
729 InlineAsmRegClass::Err => unreachable!(),
733 /// Type to use for outputs that are discarded. It doesn't really matter what
734 /// the type is, as long as it is valid for the constraint code.
735 fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll Type {
737 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
738 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
739 | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
740 cx.type_vector(cx.type_i64(), 2)
742 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
743 unreachable!("clobber-only")
745 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
746 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
747 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
748 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
749 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
750 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
751 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
752 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
753 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
754 | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
755 cx.type_vector(cx.type_i64(), 2)
757 InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
758 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
759 InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
760 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
761 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
762 InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
763 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
764 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
765 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
766 InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
767 | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
768 unreachable!("clobber-only")
770 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
771 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
772 InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
773 unreachable!("clobber-only")
775 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
776 | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
777 InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
778 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
779 | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
780 | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
781 InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
782 InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
783 unreachable!("clobber-only")
785 InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
786 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
787 InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
788 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
789 InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
790 InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
791 bug!("LLVM backend does not support SPIR-V")
793 InlineAsmRegClass::Err => unreachable!(),
797 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
798 /// the equivalent integer type.
799 fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type {
801 Primitive::Int(Integer::I8, _) => cx.type_i8(),
802 Primitive::Int(Integer::I16, _) => cx.type_i16(),
803 Primitive::Int(Integer::I32, _) => cx.type_i32(),
804 Primitive::Int(Integer::I64, _) => cx.type_i64(),
805 Primitive::F32 => cx.type_f32(),
806 Primitive::F64 => cx.type_f64(),
807 Primitive::Pointer => cx.type_isize(),
812 /// Fix up an input value to work around LLVM bugs.
814 bx: &mut Builder<'a, 'll, 'tcx>,
815 mut value: &'ll Value,
816 reg: InlineAsmRegClass,
817 layout: &TyAndLayout<'tcx>,
819 match (reg, layout.abi) {
820 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
821 if let Primitive::Int(Integer::I8, _) = s.value {
822 let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
823 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
828 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
829 let elem_ty = llvm_asm_scalar_type(bx.cx, s);
830 let count = 16 / layout.size.bytes();
831 let vec_ty = bx.cx.type_vector(elem_ty, count);
832 if let Primitive::Pointer = s.value {
833 value = bx.ptrtoint(value, bx.cx.type_isize());
835 bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
838 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
839 Abi::Vector { element, count },
840 ) if layout.size.bytes() == 8 => {
841 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
842 let vec_ty = bx.cx.type_vector(elem_ty, count);
843 let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
844 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
846 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
847 if s.value == Primitive::F64 =>
849 bx.bitcast(value, bx.cx.type_i64())
852 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
854 ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
856 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
859 if let Primitive::Int(Integer::I32, _) = s.value {
860 bx.bitcast(value, bx.cx.type_f32())
866 InlineAsmRegClass::Arm(
867 ArmInlineAsmRegClass::dreg
868 | ArmInlineAsmRegClass::dreg_low8
869 | ArmInlineAsmRegClass::dreg_low16,
873 if let Primitive::Int(Integer::I64, _) = s.value {
874 bx.bitcast(value, bx.cx.type_f64())
879 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
880 // MIPS only supports register-length arithmetics.
881 Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
882 Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
883 Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
890 /// Fix up an output value to work around LLVM bugs.
891 fn llvm_fixup_output(
892 bx: &mut Builder<'a, 'll, 'tcx>,
893 mut value: &'ll Value,
894 reg: InlineAsmRegClass,
895 layout: &TyAndLayout<'tcx>,
897 match (reg, layout.abi) {
898 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
899 if let Primitive::Int(Integer::I8, _) = s.value {
900 bx.extract_element(value, bx.const_i32(0))
905 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
906 value = bx.extract_element(value, bx.const_i32(0));
907 if let Primitive::Pointer = s.value {
908 value = bx.inttoptr(value, layout.llvm_type(bx.cx));
913 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
914 Abi::Vector { element, count },
915 ) if layout.size.bytes() == 8 => {
916 let elem_ty = llvm_asm_scalar_type(bx.cx, element);
917 let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
918 let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
919 bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
921 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
922 if s.value == Primitive::F64 =>
924 bx.bitcast(value, bx.cx.type_f64())
927 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
929 ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
931 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
934 if let Primitive::Int(Integer::I32, _) = s.value {
935 bx.bitcast(value, bx.cx.type_i32())
941 InlineAsmRegClass::Arm(
942 ArmInlineAsmRegClass::dreg
943 | ArmInlineAsmRegClass::dreg_low8
944 | ArmInlineAsmRegClass::dreg_low16,
948 if let Primitive::Int(Integer::I64, _) = s.value {
949 bx.bitcast(value, bx.cx.type_i64())
954 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
955 // MIPS only supports register-length arithmetics.
956 Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
957 Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
958 Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
959 Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
966 /// Output type to use for llvm_fixup_output.
967 fn llvm_fixup_output_type(
968 cx: &CodegenCx<'ll, 'tcx>,
969 reg: InlineAsmRegClass,
970 layout: &TyAndLayout<'tcx>,
972 match (reg, layout.abi) {
973 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
974 if let Primitive::Int(Integer::I8, _) = s.value {
975 cx.type_vector(cx.type_i8(), 8)
980 (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
981 let elem_ty = llvm_asm_scalar_type(cx, s);
982 let count = 16 / layout.size.bytes();
983 cx.type_vector(elem_ty, count)
986 InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
987 Abi::Vector { element, count },
988 ) if layout.size.bytes() == 8 => {
989 let elem_ty = llvm_asm_scalar_type(cx, element);
990 cx.type_vector(elem_ty, count * 2)
992 (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
993 if s.value == Primitive::F64 =>
998 InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
1000 ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
1002 InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
1005 if let Primitive::Int(Integer::I32, _) = s.value {
1008 layout.llvm_type(cx)
1012 InlineAsmRegClass::Arm(
1013 ArmInlineAsmRegClass::dreg
1014 | ArmInlineAsmRegClass::dreg_low8
1015 | ArmInlineAsmRegClass::dreg_low16,
1019 if let Primitive::Int(Integer::I64, _) = s.value {
1022 layout.llvm_type(cx)
1025 (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
1026 // MIPS only supports register-length arithmetics.
1027 Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
1028 Primitive::F32 => cx.type_i32(),
1029 Primitive::F64 => cx.type_i64(),
1030 _ => layout.llvm_type(cx),
1032 _ => layout.llvm_type(cx),