1 use crate::back::metadata::create_compressed_metadata_file;
2 use crate::back::write::{
3 compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
4 submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
6 use crate::common::{IntPredicate, RealPredicate, TypeKind};
9 use crate::mir::operand::OperandValue;
10 use crate::mir::place::PlaceRef;
12 use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
14 use rustc_attr as attr;
15 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
16 use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
18 use rustc_data_structures::sync::par_iter;
19 #[cfg(parallel_compiler)]
20 use rustc_data_structures::sync::ParallelIterator;
22 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
23 use rustc_hir::lang_items::LangItem;
24 use rustc_hir::weak_lang_items::WEAK_ITEMS_SYMBOLS;
25 use rustc_index::vec::Idx;
26 use rustc_metadata::EncodedMetadata;
27 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
28 use rustc_middle::middle::exported_symbols;
29 use rustc_middle::middle::exported_symbols::SymbolExportKind;
30 use rustc_middle::middle::lang_items;
31 use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
32 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
33 use rustc_middle::ty::query::Providers;
34 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
35 use rustc_session::cgu_reuse_tracker::CguReuse;
36 use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
37 use rustc_session::Session;
38 use rustc_span::symbol::sym;
39 use rustc_span::Symbol;
40 use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
41 use rustc_target::abi::{Align, VariantIdx};
43 use std::collections::BTreeSet;
44 use std::convert::TryFrom;
45 use std::time::{Duration, Instant};
47 use itertools::Itertools;
49 pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
51 hir::BinOpKind::Eq => IntPredicate::IntEQ,
52 hir::BinOpKind::Ne => IntPredicate::IntNE,
53 hir::BinOpKind::Lt => {
60 hir::BinOpKind::Le => {
67 hir::BinOpKind::Gt => {
74 hir::BinOpKind::Ge => {
82 "comparison_op_to_icmp_predicate: expected comparison operator, \
89 pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
91 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
92 hir::BinOpKind::Ne => RealPredicate::RealUNE,
93 hir::BinOpKind::Lt => RealPredicate::RealOLT,
94 hir::BinOpKind::Le => RealPredicate::RealOLE,
95 hir::BinOpKind::Gt => RealPredicate::RealOGT,
96 hir::BinOpKind::Ge => RealPredicate::RealOGE,
99 "comparison_op_to_fcmp_predicate: expected comparison operator, \
107 pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
115 let signed = match t.kind() {
117 let cmp = bin_op_to_fcmp_predicate(op);
118 let cmp = bx.fcmp(cmp, lhs, rhs);
119 return bx.sext(cmp, ret_ty);
121 ty::Uint(_) => false,
123 _ => bug!("compare_simd_types: invalid SIMD type"),
126 let cmp = bin_op_to_icmp_predicate(op, signed);
127 let cmp = bx.icmp(cmp, lhs, rhs);
128 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
129 // to get the correctly sized type. This will compile to a single instruction
130 // once the IR is converted to assembly if the SIMD instruction is supported
131 // by the target architecture.
135 /// Retrieves the information we are losing (making dynamic) in an unsizing
138 /// The `old_info` argument is a bit odd. It is intended for use in an upcast,
139 /// where the new vtable for an object will be derived from the old one.
140 pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
144 old_info: Option<Bx::Value>,
147 let (source, target) =
148 cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
149 match (source.kind(), target.kind()) {
150 (&ty::Array(_, len), &ty::Slice(_)) => {
151 cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
153 (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
155 old_info.expect("unsized_info: missing old info for trait upcasting coercion");
156 if data_a.principal_def_id() == data_b.principal_def_id() {
157 // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
161 // trait upcasting coercion
164 cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
166 if let Some(entry_idx) = vptr_entry_idx {
167 let ptr_ty = cx.type_i8p();
168 let ptr_align = cx.tcx().data_layout.pointer_align.abi;
169 let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
170 cx.layout_of(cx.tcx().mk_mut_ptr(target)),
174 let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
175 let gep = bx.inbounds_gep(
178 &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
180 let new_vptr = bx.load(ptr_ty, gep, ptr_align);
181 bx.nonnull_metadata(new_vptr);
182 // VTable loads are invariant.
183 bx.set_invariant_load(new_vptr);
184 bx.pointercast(new_vptr, vtable_ptr_ty)
189 (_, &ty::Dynamic(ref data, ..)) => {
190 let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
191 cx.layout_of(cx.tcx().mk_mut_ptr(target)),
195 cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
197 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
201 /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
202 pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
207 old_info: Option<Bx::Value>,
208 ) -> (Bx::Value, Bx::Value) {
209 debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
210 match (src_ty.kind(), dst_ty.kind()) {
211 (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
212 | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
213 assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
214 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
215 (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
217 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
218 assert_eq!(def_a, def_b);
219 let src_layout = bx.cx().layout_of(src_ty);
220 let dst_layout = bx.cx().layout_of(dst_ty);
221 if src_ty == dst_ty {
222 return (src, old_info.unwrap());
224 let mut result = None;
225 for i in 0..src_layout.fields.count() {
226 let src_f = src_layout.field(bx.cx(), i);
231 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
232 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
233 assert_eq!(src_layout.size, src_f.size);
235 let dst_f = dst_layout.field(bx.cx(), i);
236 assert_ne!(src_f.ty, dst_f.ty);
237 assert_eq!(result, None);
238 result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
240 let (lldata, llextra) = result.unwrap();
241 let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
242 let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
243 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
244 (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
246 _ => bug!("unsize_ptr: called on bad types"),
250 /// Coerces `src`, which is a reference to a value of type `src_ty`,
251 /// to a value of type `dst_ty`, and stores the result in `dst`.
252 pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
254 src: PlaceRef<'tcx, Bx::Value>,
255 dst: PlaceRef<'tcx, Bx::Value>,
257 let src_ty = src.layout.ty;
258 let dst_ty = dst.layout.ty;
259 match (src_ty.kind(), dst_ty.kind()) {
260 (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
261 let (base, info) = match bx.load_operand(src).val {
262 OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
263 OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
264 OperandValue::Ref(..) => bug!(),
266 OperandValue::Pair(base, info).store(bx, dst);
269 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
270 assert_eq!(def_a, def_b);
272 for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
273 let src_f = src.project_field(bx, i);
274 let dst_f = dst.project_field(bx, i);
276 if dst_f.layout.is_zst() {
280 if src_f.layout.ty == dst_f.layout.ty {
291 coerce_unsized_into(bx, src_f, dst_f);
295 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
299 pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
305 cast_shift_rhs(bx, op, lhs, rhs)
308 fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
314 // Shifts may have any size int on the rhs
316 let mut rhs_llty = bx.cx().val_ty(rhs);
317 let mut lhs_llty = bx.cx().val_ty(lhs);
318 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
319 rhs_llty = bx.cx().element_type(rhs_llty)
321 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
322 lhs_llty = bx.cx().element_type(lhs_llty)
324 let rhs_sz = bx.cx().int_width(rhs_llty);
325 let lhs_sz = bx.cx().int_width(lhs_llty);
327 bx.trunc(rhs, lhs_llty)
328 } else if lhs_sz > rhs_sz {
329 // FIXME (#1877: If in the future shifting by negative
330 // values is no longer undefined then this is wrong.
331 bx.zext(rhs, lhs_llty)
340 /// Returns `true` if this session's target will use SEH-based unwinding.
342 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
343 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
344 /// 64-bit MinGW) instead of "full SEH".
345 pub fn wants_msvc_seh(sess: &Session) -> bool {
346 sess.target.is_like_msvc
349 pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
355 layout: TyAndLayout<'tcx>,
358 let size = layout.size.bytes();
363 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
366 pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
367 cx: &'a Bx::CodegenCx,
368 instance: Instance<'tcx>,
370 // this is an info! to allow collecting monomorphization statistics
371 // and to allow finding the last function before LLVM aborts from
373 info!("codegen_instance({})", instance);
375 mir::codegen_mir::<Bx>(cx, instance);
378 /// Creates the `main` function which will initialize the rust runtime and call
379 /// users main function.
380 pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
381 cx: &'a Bx::CodegenCx,
382 ) -> Option<Bx::Function> {
383 let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
384 let main_is_local = main_def_id.is_local();
385 let instance = Instance::mono(cx.tcx(), main_def_id);
388 // We want to create the wrapper in the same codegen unit as Rust's main
390 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
393 } else if !cx.codegen_unit().is_primary() {
394 // We want to create the wrapper only when the codegen unit is the primary one
398 let main_llfn = cx.get_fn_addr(instance);
400 let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, entry_type);
401 return Some(entry_fn);
403 fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
404 cx: &'a Bx::CodegenCx,
405 rust_main: Bx::Value,
406 rust_main_def_id: DefId,
407 entry_type: EntryFnType,
409 // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
410 // depending on whether the target needs `argc` and `argv` to be passed in.
411 let llfty = if cx.sess().target.main_needs_argc_argv {
412 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
414 cx.type_func(&[], cx.type_int())
417 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
418 // Given that `main()` has no arguments,
419 // then its return type cannot have
420 // late-bound regions, since late-bound
421 // regions must appear in the argument
423 let main_ret_ty = cx.tcx().normalize_erasing_regions(
424 ty::ParamEnv::reveal_all(),
425 main_ret_ty.no_bound_vars().unwrap(),
428 let Some(llfn) = cx.declare_c_main(llfty) else {
429 // FIXME: We should be smart and show a better diagnostic here.
430 let span = cx.tcx().def_span(rust_main_def_id);
432 .struct_span_err(span, "entry symbol `main` declared multiple times")
433 .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
435 cx.sess().abort_if_errors();
439 // `main` should respect same config for frame pointer elimination as rest of code
440 cx.set_frame_pointer_type(llfn);
441 cx.apply_target_cpu_attr(llfn);
443 let llbb = Bx::append_block(&cx, llfn, "top");
444 let mut bx = Bx::build(&cx, llbb);
446 bx.insert_reference_to_gdb_debug_scripts_section_global();
448 let isize_ty = cx.type_isize();
449 let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
450 let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
452 let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
453 let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
454 let start_fn = cx.get_fn_addr(
455 ty::Instance::resolve(
457 ty::ParamEnv::reveal_all(),
459 cx.tcx().intern_substs(&[main_ret_ty.into()]),
465 let i8_ty = cx.type_i8();
466 let arg_sigpipe = bx.const_u8(sigpipe);
469 cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
470 (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
472 debug!("using user-defined start fn");
473 let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
474 (rust_main, start_ty, vec![arg_argc, arg_argv])
477 let result = bx.call(start_ty, None, start_fn, &args, None);
478 let cast = bx.intcast(result, cx.type_int(), true);
485 /// Obtain the `argc` and `argv` values to pass to the rust start function.
486 fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
487 cx: &'a Bx::CodegenCx,
489 ) -> (Bx::Value, Bx::Value) {
490 if cx.sess().target.main_needs_argc_argv {
491 // Params from native `main()` used as args for rust start function
492 let param_argc = bx.get_param(0);
493 let param_argv = bx.get_param(1);
494 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
495 let arg_argv = param_argv;
498 // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
499 let arg_argc = bx.const_int(cx.type_int(), 0);
500 let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
505 /// This function returns all of the debugger visualizers specified for the
506 /// current crate as well as all upstream crates transitively that match the
507 /// `visualizer_type` specified.
508 pub fn collect_debugger_visualizers_transitive(
510 visualizer_type: DebuggerVisualizerType,
511 ) -> BTreeSet<DebuggerVisualizerFile> {
512 tcx.debugger_visualizers(LOCAL_CRATE)
518 let used_crate_source = tcx.used_crate_source(*cnum);
519 used_crate_source.rlib.is_some() || used_crate_source.rmeta.is_some()
521 .flat_map(|&cnum| tcx.debugger_visualizers(cnum)),
523 .filter(|visualizer| visualizer.visualizer_type == visualizer_type)
525 .collect::<BTreeSet<_>>()
528 pub fn codegen_crate<B: ExtraBackendMethods>(
532 metadata: EncodedMetadata,
533 need_metadata_module: bool,
534 ) -> OngoingCodegen<B> {
535 // Skip crate items and just output metadata in -Z no-codegen mode.
536 if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
537 let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
539 ongoing_codegen.codegen_finished(tcx);
541 ongoing_codegen.check_for_errors(tcx.sess);
543 return ongoing_codegen;
546 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
548 // Run the monomorphization collector and partition the collected items into
550 let codegen_units = tcx.collect_and_partition_mono_items(()).1;
552 // Force all codegen_unit queries so they are already either red or green
553 // when compile_codegen_unit accesses them. We are not able to re-execute
554 // the codegen_unit query from just the DepNode, so an unknown color would
555 // lead to having to re-execute compile_codegen_unit, possibly
557 if tcx.dep_graph.is_fully_enabled() {
558 for cgu in codegen_units {
559 tcx.ensure().codegen_unit(cgu.name());
563 let metadata_module = if need_metadata_module {
564 // Emit compressed metadata object.
565 let metadata_cgu_name =
566 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
567 tcx.sess.time("write_compressed_metadata", || {
569 tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
570 let data = create_compressed_metadata_file(
573 &exported_symbols::metadata_symbol_name(tcx),
575 if let Err(err) = std::fs::write(&file_name, data) {
576 tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
578 Some(CompiledModule {
579 name: metadata_cgu_name,
580 kind: ModuleKind::Metadata,
581 object: Some(file_name),
590 let ongoing_codegen = start_async_codegen(
599 // Codegen an allocator shim, if necessary.
601 // If the crate doesn't have an `allocator_kind` set then there's definitely
602 // no shim to generate. Otherwise we also check our dependency graph for all
603 // our output crate types. If anything there looks like its a `Dynamic`
604 // linkage, then it's already got an allocator shim and we'll be using that
605 // one instead. If nothing exists then it's our job to generate the
607 let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
608 use rustc_middle::middle::dependency_format::Linkage;
609 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
611 let allocator_module = if any_dynamic_crate {
613 } else if let Some(kind) = tcx.allocator_kind(()) {
615 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
616 let module_llvm = tcx.sess.time("write_allocator_module", || {
617 backend.codegen_allocator(tcx, &llmod_id, kind, tcx.lang_items().oom().is_some())
620 Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
625 if let Some(allocator_module) = allocator_module {
626 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
629 // For better throughput during parallel processing by LLVM, we used to sort
630 // CGUs largest to smallest. This would lead to better thread utilization
631 // by, for example, preventing a large CGU from being processed last and
632 // having only one LLVM thread working while the rest remained idle.
634 // However, this strategy would lead to high memory usage, as it meant the
635 // LLVM-IR for all of the largest CGUs would be resident in memory at once.
637 // Instead, we can compromise by ordering CGUs such that the largest and
638 // smallest are first, second largest and smallest are next, etc. If there
639 // are large size variations, this can reduce memory usage significantly.
640 let codegen_units: Vec<_> = {
641 let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
642 sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
644 let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
645 second_half.iter().rev().interleave(first_half).copied().collect()
648 // Calculate the CGU reuse
649 let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
650 codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
653 let mut total_codegen_time = Duration::new(0, 0);
654 let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
656 // The non-parallel compiler can only translate codegen units to LLVM IR
657 // on a single thread, leading to a staircase effect where the N LLVM
658 // threads have to wait on the single codegen threads to generate work
659 // for them. The parallel compiler does not have this restriction, so
660 // we can pre-load the LLVM queue in parallel before handing off
661 // coordination to the OnGoingCodegen scheduler.
663 // This likely is a temporary measure. Once we don't have to support the
664 // non-parallel compiler anymore, we can compile CGUs end-to-end in
665 // parallel and get rid of the complicated scheduling logic.
666 let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
667 tcx.sess.time("compile_first_CGU_batch", || {
668 // Try to find one CGU to compile per thread.
669 let cgus: Vec<_> = cgu_reuse
672 .filter(|&(_, reuse)| reuse == &CguReuse::No)
673 .take(tcx.sess.threads())
676 // Compile the found CGUs in parallel.
677 let start_time = Instant::now();
679 let pre_compiled_cgus = par_iter(cgus)
681 let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
686 total_codegen_time += start_time.elapsed();
694 for (i, cgu) in codegen_units.iter().enumerate() {
695 ongoing_codegen.wait_for_signal_to_codegen_item();
696 ongoing_codegen.check_for_errors(tcx.sess);
698 let cgu_reuse = cgu_reuse[i];
699 tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
703 let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
706 let start_time = Instant::now();
707 let module = backend.compile_codegen_unit(tcx, cgu.name());
708 total_codegen_time += start_time.elapsed();
711 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
712 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
713 // compilation hang on post-monomorphization errors.
714 tcx.sess.abort_if_errors();
716 submit_codegened_module_to_llvm(
718 &ongoing_codegen.coordinator.sender,
724 CguReuse::PreLto => {
725 submit_pre_lto_module_to_llvm(
728 &ongoing_codegen.coordinator.sender,
729 CachedModuleCodegen {
730 name: cgu.name().to_string(),
731 source: cgu.previous_work_product(tcx),
736 CguReuse::PostLto => {
737 submit_post_lto_module_to_llvm(
739 &ongoing_codegen.coordinator.sender,
740 CachedModuleCodegen {
741 name: cgu.name().to_string(),
742 source: cgu.previous_work_product(tcx),
750 ongoing_codegen.codegen_finished(tcx);
752 // Since the main thread is sometimes blocked during codegen, we keep track
753 // -Ztime-passes output manually.
754 if tcx.sess.time_passes() {
755 let end_rss = get_resident_set_size();
757 print_time_passes_entry(
758 "codegen_to_LLVM_IR",
765 ongoing_codegen.check_for_errors(tcx.sess);
770 pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
771 let exported_symbols = tcx
775 .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
777 let linked_symbols = tcx
781 .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
783 let local_crate_name = tcx.crate_name(LOCAL_CRATE);
784 let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
785 let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
786 let windows_subsystem = subsystem.map(|subsystem| {
787 if subsystem != sym::windows && subsystem != sym::console {
788 tcx.sess.fatal(&format!(
789 "invalid windows subsystem `{}`, only \
790 `windows` and `console` are allowed",
794 subsystem.to_string()
797 // This list is used when generating the command line to pass through to
798 // system linker. The linker expects undefined symbols on the left of the
799 // command line to be defined in libraries on the right, not the other way
800 // around. For more info, see some comments in the add_used_library function
803 // In order to get this left-to-right dependency ordering, we use the reverse
804 // postorder of all crates putting the leaves at the right-most positions.
805 let used_crates = tcx
810 .filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
813 let mut info = CrateInfo {
818 compiler_builtins: None,
819 profiler_runtime: None,
820 is_no_builtins: Default::default(),
821 native_libraries: Default::default(),
822 used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
823 crate_name: Default::default(),
825 used_crate_source: Default::default(),
826 dependency_formats: tcx.dependency_formats(()).clone(),
828 natvis_debugger_visualizers: Default::default(),
830 let crates = tcx.crates(());
832 let n_crates = crates.len();
833 info.native_libraries.reserve(n_crates);
834 info.crate_name.reserve(n_crates);
835 info.used_crate_source.reserve(n_crates);
837 for &cnum in crates.iter() {
838 info.native_libraries
839 .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
840 info.crate_name.insert(cnum, tcx.crate_name(cnum));
842 let used_crate_source = tcx.used_crate_source(cnum);
843 info.used_crate_source.insert(cnum, used_crate_source.clone());
844 if tcx.is_compiler_builtins(cnum) {
845 info.compiler_builtins = Some(cnum);
847 if tcx.is_profiler_runtime(cnum) {
848 info.profiler_runtime = Some(cnum);
850 if tcx.is_no_builtins(cnum) {
851 info.is_no_builtins.insert(cnum);
855 // Handle circular dependencies in the standard library.
856 // See comment before `add_linked_symbol_object` function for the details.
857 // With msvc-like linkers it's both unnecessary (they support circular dependencies),
858 // and causes linking issues (when weak lang item symbols are "privatized" by LTO).
859 let target = &tcx.sess.target;
860 if !target.is_like_msvc {
861 let missing_weak_lang_items: FxHashSet<&Symbol> = info
865 tcx.missing_lang_items(*cnum)
867 .filter(|l| lang_items::required(tcx, **l))
868 .filter_map(|item| WEAK_ITEMS_SYMBOLS.get(item))
871 let prefix = if target.is_like_windows && target.arch == "x86" { "_" } else { "" };
874 .filter(|(crate_type, _)| {
875 !matches!(crate_type, CrateType::Rlib | CrateType::Staticlib)
877 .for_each(|(_, linked_symbols)| {
878 linked_symbols.extend(
879 missing_weak_lang_items
881 .map(|item| (format!("{prefix}{item}"), SymbolExportKind::Text)),
886 let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
887 CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
888 // These are crate types for which we invoke the linker and can embed
889 // NatVis visualizers.
892 CrateType::ProcMacro => {
893 // We could embed NatVis for proc macro crates too (to improve the debugging
894 // experience for them) but it does not seem like a good default, since
895 // this is a rare use case and we don't want to slow down the common case.
898 CrateType::Staticlib | CrateType::Rlib => {
899 // We don't invoke the linker for these, so we don't need to collect the NatVis for them.
904 if target.is_like_msvc && embed_visualizers {
905 info.natvis_debugger_visualizers =
906 collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
913 pub fn provide(providers: &mut Providers) {
914 providers.backend_optimization_level = |tcx, cratenum| {
915 let for_speed = match tcx.sess.opts.optimize {
916 // If globally no optimisation is done, #[optimize] has no effect.
918 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
919 // pass manager and it is likely that some module-wide passes (such as inliner or
920 // cross-function constant propagation) would ignore the `optnone` annotation we put
921 // on the functions, thus necessarily involving these functions into optimisations.
922 config::OptLevel::No => return config::OptLevel::No,
923 // If globally optimise-speed is already specified, just use that level.
924 config::OptLevel::Less => return config::OptLevel::Less,
925 config::OptLevel::Default => return config::OptLevel::Default,
926 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
927 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
929 config::OptLevel::Size => config::OptLevel::Default,
930 config::OptLevel::SizeMin => config::OptLevel::Default,
933 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
935 let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
937 attr::OptimizeAttr::None => continue,
938 attr::OptimizeAttr::Size => continue,
939 attr::OptimizeAttr::Speed => {
944 tcx.sess.opts.optimize
948 fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
949 if !tcx.dep_graph.is_fully_enabled() {
953 let work_product_id = &cgu.work_product_id();
954 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
955 // We don't have anything cached for this CGU. This can happen
956 // if the CGU did not exist in the previous session.
960 // Try to mark the CGU as green. If it we can do so, it means that nothing
961 // affecting the LLVM module has changed and we can re-use a cached version.
962 // If we compile with any kind of LTO, this means we can re-use the bitcode
963 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
964 // know that later). If we are not doing LTO, there is only one optimized
965 // version of each module, so we re-use that.
966 let dep_node = cgu.codegen_dep_node(tcx);
968 !tcx.dep_graph.dep_node_exists(&dep_node),
969 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
973 if tcx.try_mark_green(&dep_node) {
974 // We can re-use either the pre- or the post-thinlto state. If no LTO is
975 // being performed then we can use post-LTO artifacts, otherwise we must
976 // reuse pre-LTO artifacts
977 match compute_per_cgu_lto_type(
980 &tcx.sess.crate_types(),
983 ComputedLtoType::No => CguReuse::PostLto,
984 _ => CguReuse::PreLto,