2 use crate::back::write::to_llvm_code_model;
3 use crate::callee::get_fn;
4 use crate::coverageinfo;
8 use crate::type_::Type;
9 use crate::value::Value;
12 use rustc_codegen_ssa::base::wants_msvc_seh;
13 use rustc_codegen_ssa::traits::*;
14 use rustc_data_structures::base_n;
15 use rustc_data_structures::fx::FxHashMap;
16 use rustc_data_structures::small_c_str::SmallCStr;
17 use rustc_middle::mir::mono::CodegenUnit;
18 use rustc_middle::ty::layout::{
19 FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
22 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
23 use rustc_middle::{bug, span_bug};
24 use rustc_session::config::{CFGuard, CrateType, DebugInfo};
25 use rustc_session::Session;
26 use rustc_span::source_map::Span;
27 use rustc_span::symbol::Symbol;
28 use rustc_target::abi::{
29 call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
31 use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
32 use smallvec::SmallVec;
34 use std::cell::{Cell, RefCell};
38 /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
39 /// `llvm::Context` so that several compilation units may be optimized in parallel.
40 /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
41 pub struct CodegenCx<'ll, 'tcx> {
42 pub tcx: TyCtxt<'tcx>,
43 pub check_overflow: bool,
44 pub use_dll_storage_attrs: bool,
45 pub tls_model: llvm::ThreadLocalMode,
47 pub llmod: &'ll llvm::Module,
48 pub llcx: &'ll llvm::Context,
49 pub codegen_unit: &'tcx CodegenUnit<'tcx>,
51 /// Cache instances of monomorphic and polymorphic items
52 pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
53 /// Cache generated vtables
55 RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
56 /// Cache of constant strings,
57 pub const_cstr_cache: RefCell<FxHashMap<Symbol, &'ll Value>>,
59 /// Reverse-direction for const ptrs cast from globals.
61 /// Key is a Value holding a `*T`,
62 /// Val is a Value holding a `*[T]`.
64 /// Needed because LLVM loses pointer->pointee association
65 /// when we ptrcast, and we have to ptrcast during codegen
66 /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
67 /// a pointer to an LLVM array type. Similar for trait objects.
68 pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
70 /// Cache of emitted const globals (value -> global)
71 pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
73 /// List of globals for static variables which need to be passed to the
74 /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
75 /// (We have to make sure we don't invalidate any Values referring
77 pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
79 /// Statics that will be placed in the llvm.used variable
80 /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
81 pub used_statics: RefCell<Vec<&'ll Value>>,
83 /// Statics that will be placed in the llvm.compiler.used variable
84 /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
85 pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
87 /// Mapping of non-scalar types to llvm types and field remapping if needed.
88 pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
90 /// Mapping of scalar types to llvm types.
91 pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
93 pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
94 pub isize_ty: &'ll Type,
96 pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
97 pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
99 eh_personality: Cell<Option<&'ll Value>>,
100 eh_catch_typeinfo: Cell<Option<&'ll Value>>,
101 pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
103 intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
105 /// A counter that is used for generating local symbol names
106 local_gen_sym_counter: Cell<usize>,
109 pub struct TypeLowering<'ll> {
110 /// Associated LLVM type
111 pub lltype: &'ll Type,
113 /// If padding is used the slice maps fields from source order
115 pub field_remapping: Option<SmallVec<[u32; 4]>>,
118 fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
120 TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
121 TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
122 TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
123 TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
127 pub unsafe fn create_module(
129 llcx: &'ll llvm::Context,
131 ) -> &'ll llvm::Module {
133 let mod_name = SmallCStr::new(mod_name);
134 let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
136 let mut target_data_layout = sess.target.data_layout.clone();
137 if llvm_util::get_version() < (12, 0, 0) && sess.target.arch == "powerpc64" {
138 target_data_layout = target_data_layout.replace("-v256:256:256-v512:512:512", "");
140 if llvm_util::get_version() < (13, 0, 0) {
141 if sess.target.arch == "powerpc64" {
142 target_data_layout = target_data_layout.replace("-S128", "");
144 if sess.target.arch == "wasm32" {
145 target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string();
147 if sess.target.arch == "wasm64" {
148 target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string();
152 // Ensure the data-layout values hardcoded remain the defaults.
153 if sess.target.is_builtin {
154 let tm = crate::back::write::create_informational_target_machine(tcx.sess);
155 llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
156 llvm::LLVMRustDisposeTargetMachine(tm);
158 let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
159 let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
160 .expect("got a non-UTF8 data-layout from LLVM");
162 // Unfortunately LLVM target specs change over time, and right now we
163 // don't have proper support to work with any more than one
164 // `data_layout` than the one that is in the rust-lang/rust repo. If
165 // this compiler is configured against a custom LLVM, we may have a
166 // differing data layout, even though we should update our own to use
169 // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
170 // disable this check entirely as we may be configured with something
171 // that has a different target layout.
173 // Unsure if this will actually cause breakage when rustc is configured
177 let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
178 let custom_llvm_used = cfg_llvm_root.trim() != "";
180 if !custom_llvm_used && target_data_layout != llvm_data_layout {
182 "data-layout for target `{rustc_target}`, `{rustc_layout}`, \
183 differs from LLVM target's `{llvm_target}` default layout, `{llvm_layout}`",
184 rustc_target = sess.opts.target_triple,
185 rustc_layout = target_data_layout,
186 llvm_target = sess.target.llvm_target,
187 llvm_layout = llvm_data_layout
192 let data_layout = SmallCStr::new(&target_data_layout);
193 llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
195 let llvm_target = SmallCStr::new(&sess.target.llvm_target);
196 llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
198 if sess.relocation_model() == RelocModel::Pic {
199 llvm::LLVMRustSetModulePICLevel(llmod);
200 // PIE is potentially more effective than PIC, but can only be used in executables.
201 // If all our outputs are executables, then we can relax PIC to PIE.
202 if sess.crate_types().iter().all(|ty| *ty == CrateType::Executable) {
203 llvm::LLVMRustSetModulePIELevel(llmod);
207 // Linking object files with different code models is undefined behavior
208 // because the compiler would have to generate additional code (to span
209 // longer jumps) if a larger code model is used with a smaller one.
211 // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
212 llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
214 // If skipping the PLT is enabled, we need to add some module metadata
215 // to ensure intrinsic calls don't use it.
216 if !sess.needs_plt() {
217 let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
218 llvm::LLVMRustAddModuleFlag(llmod, avoid_plt, 1);
221 // Control Flow Guard is currently only supported by the MSVC linker on Windows.
222 if sess.target.is_like_msvc {
223 match sess.opts.cg.control_flow_guard {
224 CFGuard::Disabled => {}
225 CFGuard::NoChecks => {
226 // Set `cfguard=1` module flag to emit metadata only.
227 llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 1)
230 // Set `cfguard=2` module flag to emit metadata and checks.
231 llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 2)
239 impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
242 codegen_unit: &'tcx CodegenUnit<'tcx>,
243 llvm_module: &'ll crate::ModuleLlvm,
245 // An interesting part of Windows which MSVC forces our hand on (and
246 // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
247 // attributes in LLVM IR as well as native dependencies (in C these
248 // correspond to `__declspec(dllimport)`).
250 // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
251 // relying on that can result in issues like #50176.
252 // LLD won't support that and expects symbols with proper attributes.
253 // Because of that we make MinGW target emit dllexport just like MSVC.
254 // When it comes to dllimport we use it for constants but for functions
255 // rely on the linker to do the right thing. Opposed to dllexport this
256 // task is easy for them (both LD and LLD) and allows us to easily use
257 // symbols from static libraries in shared libraries.
259 // Whenever a dynamic library is built on Windows it must have its public
260 // interface specified by functions tagged with `dllexport` or otherwise
261 // they're not available to be linked against. This poses a few problems
262 // for the compiler, some of which are somewhat fundamental, but we use
263 // the `use_dll_storage_attrs` variable below to attach the `dllexport`
264 // attribute to all LLVM functions that are exported e.g., they're
265 // already tagged with external linkage). This is suboptimal for a few
268 // * If an object file will never be included in a dynamic library,
269 // there's no need to attach the dllexport attribute. Most object
270 // files in Rust are not destined to become part of a dll as binaries
271 // are statically linked by default.
272 // * If the compiler is emitting both an rlib and a dylib, the same
273 // source object file is currently used but with MSVC this may be less
274 // feasible. The compiler may be able to get around this, but it may
275 // involve some invasive changes to deal with this.
277 // The flipside of this situation is that whenever you link to a dll and
278 // you import a function from it, the import should be tagged with
279 // `dllimport`. At this time, however, the compiler does not emit
280 // `dllimport` for any declarations other than constants (where it is
281 // required), which is again suboptimal for even more reasons!
283 // * Calling a function imported from another dll without using
284 // `dllimport` causes the linker/compiler to have extra overhead (one
285 // `jmp` instruction on x86) when calling the function.
286 // * The same object file may be used in different circumstances, so a
287 // function may be imported from a dll if the object is linked into a
288 // dll, but it may be just linked against if linked into an rlib.
289 // * The compiler has no knowledge about whether native functions should
290 // be tagged dllimport or not.
292 // For now the compiler takes the perf hit (I do not have any numbers to
293 // this effect) by marking very little as `dllimport` and praying the
294 // linker will take care of everything. Fixing this problem will likely
295 // require adding a few attributes to Rust itself (feature gated at the
296 // start) and then strongly recommending static linkage on Windows!
297 let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
299 let check_overflow = tcx.sess.overflow_checks();
301 let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
303 let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
305 let coverage_cx = if tcx.sess.instrument_coverage() {
306 let covctx = coverageinfo::CrateCoverageContext::new();
312 let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
313 let dctx = debuginfo::CrateDebugContext::new(llmod);
314 debuginfo::metadata::compile_unit_metadata(tcx, &codegen_unit.name().as_str(), &dctx);
320 let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
325 use_dll_storage_attrs,
330 instances: Default::default(),
331 vtables: Default::default(),
332 const_cstr_cache: Default::default(),
333 const_unsized: Default::default(),
334 const_globals: Default::default(),
335 statics_to_rauw: RefCell::new(Vec::new()),
336 used_statics: RefCell::new(Vec::new()),
337 compiler_used_statics: RefCell::new(Vec::new()),
338 type_lowering: Default::default(),
339 scalar_lltypes: Default::default(),
340 pointee_infos: Default::default(),
344 eh_personality: Cell::new(None),
345 eh_catch_typeinfo: Cell::new(None),
346 rust_try_fn: Cell::new(None),
347 intrinsics: Default::default(),
348 local_gen_sym_counter: Cell::new(0),
352 crate fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
353 &self.statics_to_rauw
357 pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
358 self.coverage_cx.as_ref()
361 fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
362 let section = cstr!("llvm.metadata");
363 let array = self.const_array(&self.type_ptr_to(self.type_i8()), values);
366 let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
367 llvm::LLVMSetInitializer(g, array);
368 llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
369 llvm::LLVMSetSection(g, section.as_ptr());
374 impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
377 ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
382 fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
383 get_fn(self, instance)
386 fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
387 get_fn(self, instance)
390 fn eh_personality(&self) -> &'ll Value {
391 // The exception handling personality function.
393 // If our compilation unit has the `eh_personality` lang item somewhere
394 // within it, then we just need to codegen that. Otherwise, we're
395 // building an rlib which will depend on some upstream implementation of
396 // this function, so we just codegen a generic reference to it. We don't
397 // specify any of the types for the function, we just make it a symbol
398 // that LLVM can later use.
400 // Note that MSVC is a little special here in that we don't use the
401 // `eh_personality` lang item at all. Currently LLVM has support for
402 // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
403 // *name of the personality function* to decide what kind of unwind side
404 // tables/landing pads to emit. It looks like Dwarf is used by default,
405 // injecting a dependency on the `_Unwind_Resume` symbol for resuming
406 // an "exception", but for MSVC we want to force SEH. This means that we
407 // can't actually have the personality function be our standard
408 // `rust_eh_personality` function, but rather we wired it up to the
409 // CRT's custom personality function, which forces LLVM to consider
410 // landing pads as "landing pads for SEH".
411 if let Some(llpersonality) = self.eh_personality.get() {
412 return llpersonality;
415 let llfn = match tcx.lang_items().eh_personality() {
416 Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
417 ty::Instance::resolve(
419 ty::ParamEnv::reveal_all(),
421 tcx.intern_substs(&[]),
427 let name = if wants_msvc_seh(self.sess()) {
430 "rust_eh_personality"
432 if let Some(llfn) = self.get_declared_value(name) {
435 let fty = self.type_variadic_func(&[], self.type_i32());
436 let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
437 attributes::apply_target_cpu_attr(self, llfn);
442 self.eh_personality.set(Some(llfn));
446 fn sess(&self) -> &Session {
450 fn check_overflow(&self) -> bool {
454 fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
458 fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
462 fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
463 &self.compiler_used_statics
466 fn set_frame_pointer_type(&self, llfn: &'ll Value) {
467 attributes::set_frame_pointer_type(self, llfn)
470 fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
471 attributes::apply_target_cpu_attr(self, llfn);
472 attributes::apply_tune_cpu_attr(self, llfn);
475 fn create_used_variable(&self) {
476 self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
479 fn create_compiler_used_variable(&self) {
480 self.create_used_variable_impl(
481 cstr!("llvm.compiler.used"),
482 &*self.compiler_used_statics.borrow(),
486 fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
487 if self.get_declared_value("main").is_none() {
488 Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))
490 // If the symbol already exists, it is an error: for example, the user wrote
491 // #[no_mangle] extern "C" fn main(..) {..}
492 // instead of #[start]
498 impl CodegenCx<'b, 'tcx> {
499 crate fn get_intrinsic(&self, key: &str) -> (&'b Type, &'b Value) {
500 if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
504 self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
510 args: Option<&[&'b llvm::Type]>,
512 ) -> (&'b llvm::Type, &'b llvm::Value) {
513 let fn_ty = if let Some(args) = args {
514 self.type_func(args, ret)
516 self.type_variadic_func(&[], ret)
518 let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
519 self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
523 fn declare_intrinsic(&self, key: &str) -> Option<(&'b Type, &'b Value)> {
525 ($name:expr, fn() -> $ret:expr) => (
527 return Some(self.insert_intrinsic($name, Some(&[]), $ret));
530 ($name:expr, fn(...) -> $ret:expr) => (
532 return Some(self.insert_intrinsic($name, None, $ret));
535 ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
537 return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
541 macro_rules! mk_struct {
542 ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
545 let i8p = self.type_i8p();
546 let void = self.type_void();
547 let i1 = self.type_i1();
548 let t_i8 = self.type_i8();
549 let t_i16 = self.type_i16();
550 let t_i32 = self.type_i32();
551 let t_i64 = self.type_i64();
552 let t_i128 = self.type_i128();
553 let t_isize = self.type_isize();
554 let t_f32 = self.type_f32();
555 let t_f64 = self.type_f64();
557 ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
558 ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
559 ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
560 ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
561 ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
562 ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
563 ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
564 ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
566 ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
567 ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
568 ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
569 ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
570 ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
571 ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
572 ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
573 ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
574 ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
575 ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
577 ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
578 ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
579 ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
580 ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
581 ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
582 ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
583 ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
584 ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
585 ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
586 ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
588 ifn!("llvm.trap", fn() -> void);
589 ifn!("llvm.debugtrap", fn() -> void);
590 ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
591 ifn!("llvm.sideeffect", fn() -> void);
593 ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
594 ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
596 ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
597 ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
599 ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
600 ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
602 ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
603 ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
605 ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
606 ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
608 ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
609 ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
611 ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
612 ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
614 ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
615 ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
617 ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
618 ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
620 ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
621 ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
623 ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
624 ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
626 ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
627 ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
629 ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
630 ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
631 ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
632 ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
634 ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
635 ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
637 ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
638 ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
640 ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
641 ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
643 ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
644 ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
645 ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
646 ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
648 ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
649 ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
650 ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
651 ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
653 ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
654 ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
655 ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
656 ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
657 ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
659 ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
660 ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
661 ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
662 ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
663 ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
665 ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
666 ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
667 ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
668 ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
669 ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
671 ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
672 ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
673 ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
674 ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
676 ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
677 ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
678 ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
679 ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
680 ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
682 ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
683 ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
684 ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
685 ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
686 ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
688 ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
689 ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
690 ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
691 ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
692 ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
694 ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
695 ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
696 ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
697 ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
698 ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
700 ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
701 ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
702 ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
703 ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
704 ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
706 ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
707 ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
708 ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
709 ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
710 ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
712 ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
713 ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
714 ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
715 ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
716 ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
718 ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
719 ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
720 ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
721 ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
722 ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
724 ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
725 ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
726 ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
727 ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
728 ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
730 ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
731 ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
732 ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
733 ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
734 ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
736 ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
737 ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
738 ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
739 ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
740 ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
742 ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
743 ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
744 ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
745 ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
746 ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
748 ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
749 ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
750 ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
751 ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
752 ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
754 ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
755 ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
757 ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
758 ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
759 ifn!("llvm.localescape", fn(...) -> void);
760 ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
761 ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
763 ifn!("llvm.assume", fn(i1) -> void);
764 ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
766 // This isn't an "LLVM intrinsic", but LLVM's optimization passes
767 // recognize it like one and we assume it exists in `core::slice::cmp`
768 ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32);
770 // variadic intrinsics
771 ifn!("llvm.va_start", fn(i8p) -> void);
772 ifn!("llvm.va_end", fn(i8p) -> void);
773 ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
775 if self.sess().instrument_coverage() {
776 ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
779 if self.sess().opts.debuginfo != DebugInfo::None {
780 ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
781 ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
786 crate fn eh_catch_typeinfo(&self) -> &'b Value {
787 if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
788 return eh_catch_typeinfo;
791 assert!(self.sess().target.is_like_emscripten);
792 let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
793 Some(def_id) => self.get_static(def_id),
796 .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
797 self.declare_global("rust_eh_catch_typeinfo", ty)
800 let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
801 self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
806 impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
807 /// Generates a new symbol name with the given prefix. This symbol name must
808 /// only be used for definitions with `internal` or `private` linkage.
809 pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
810 let idx = self.local_gen_sym_counter.get();
811 self.local_gen_sym_counter.set(idx + 1);
812 // Include a '.' character, so there can be no accidental conflicts with
813 // user defined names
814 let mut name = String::with_capacity(prefix.len() + 6);
815 name.push_str(prefix);
817 base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
822 impl HasDataLayout for CodegenCx<'ll, 'tcx> {
824 fn data_layout(&self) -> &TargetDataLayout {
825 &self.tcx.data_layout
829 impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
831 fn target_spec(&self) -> &Target {
832 &self.tcx.sess.target
836 impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
838 fn tcx(&self) -> TyCtxt<'tcx> {
843 impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
844 fn param_env(&self) -> ty::ParamEnv<'tcx> {
845 ty::ParamEnv::reveal_all()
849 impl LayoutOfHelpers<'tcx> for CodegenCx<'ll, 'tcx> {
850 type LayoutOfResult = TyAndLayout<'tcx>;
853 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
854 if let LayoutError::SizeOverflow(_) = err {
855 self.sess().span_fatal(span, &err.to_string())
857 span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
862 impl FnAbiOfHelpers<'tcx> for CodegenCx<'ll, 'tcx> {
863 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
866 fn handle_fn_abi_err(
868 err: FnAbiError<'tcx>,
870 fn_abi_request: FnAbiRequest<'tcx>,
872 if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
873 self.sess().span_fatal(span, &err.to_string())
875 match fn_abi_request {
876 FnAbiRequest::OfFnPtr { sig, extra_args } => {
879 "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
885 FnAbiRequest::OfInstance { instance, extra_args } => {
888 "`fn_abi_of_instance({}, {:?})` failed: {}",