1 use crate::back::metadata::create_compressed_metadata_file;
2 use crate::back::write::{
3 compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
4 submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
6 use crate::common::{IntPredicate, RealPredicate, TypeKind};
9 use crate::mir::operand::OperandValue;
10 use crate::mir::place::PlaceRef;
12 use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
14 use rustc_attr as attr;
15 use rustc_data_structures::fx::FxHashMap;
16 use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
18 use rustc_data_structures::sync::par_iter;
19 #[cfg(parallel_compiler)]
20 use rustc_data_structures::sync::ParallelIterator;
22 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
23 use rustc_hir::lang_items::LangItem;
24 use rustc_index::vec::Idx;
25 use rustc_metadata::EncodedMetadata;
26 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
27 use rustc_middle::middle::exported_symbols;
28 use rustc_middle::middle::lang_items;
29 use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
30 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
31 use rustc_middle::ty::query::Providers;
32 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
33 use rustc_session::cgu_reuse_tracker::CguReuse;
34 use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
35 use rustc_session::Session;
36 use rustc_span::symbol::sym;
37 use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
38 use rustc_target::abi::{Align, VariantIdx};
40 use std::collections::BTreeSet;
41 use std::convert::TryFrom;
42 use std::time::{Duration, Instant};
44 use itertools::Itertools;
46 pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
48 hir::BinOpKind::Eq => IntPredicate::IntEQ,
49 hir::BinOpKind::Ne => IntPredicate::IntNE,
50 hir::BinOpKind::Lt => {
57 hir::BinOpKind::Le => {
64 hir::BinOpKind::Gt => {
71 hir::BinOpKind::Ge => {
79 "comparison_op_to_icmp_predicate: expected comparison operator, \
86 pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
88 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
89 hir::BinOpKind::Ne => RealPredicate::RealUNE,
90 hir::BinOpKind::Lt => RealPredicate::RealOLT,
91 hir::BinOpKind::Le => RealPredicate::RealOLE,
92 hir::BinOpKind::Gt => RealPredicate::RealOGT,
93 hir::BinOpKind::Ge => RealPredicate::RealOGE,
96 "comparison_op_to_fcmp_predicate: expected comparison operator, \
104 pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
112 let signed = match t.kind() {
114 let cmp = bin_op_to_fcmp_predicate(op);
115 let cmp = bx.fcmp(cmp, lhs, rhs);
116 return bx.sext(cmp, ret_ty);
118 ty::Uint(_) => false,
120 _ => bug!("compare_simd_types: invalid SIMD type"),
123 let cmp = bin_op_to_icmp_predicate(op, signed);
124 let cmp = bx.icmp(cmp, lhs, rhs);
125 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
126 // to get the correctly sized type. This will compile to a single instruction
127 // once the IR is converted to assembly if the SIMD instruction is supported
128 // by the target architecture.
132 /// Retrieves the information we are losing (making dynamic) in an unsizing
135 /// The `old_info` argument is a bit odd. It is intended for use in an upcast,
136 /// where the new vtable for an object will be derived from the old one.
137 pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
141 old_info: Option<Bx::Value>,
144 let (source, target) =
145 cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
146 match (source.kind(), target.kind()) {
147 (&ty::Array(_, len), &ty::Slice(_)) => {
148 cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
150 (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
152 old_info.expect("unsized_info: missing old info for trait upcasting coercion");
153 if data_a.principal_def_id() == data_b.principal_def_id() {
154 // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
158 // trait upcasting coercion
161 cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
163 if let Some(entry_idx) = vptr_entry_idx {
164 let ptr_ty = cx.type_i8p();
165 let ptr_align = cx.tcx().data_layout.pointer_align.abi;
166 let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
167 let gep = bx.inbounds_gep(
170 &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
172 let new_vptr = bx.load(ptr_ty, gep, ptr_align);
173 bx.nonnull_metadata(new_vptr);
174 // VTable loads are invariant.
175 bx.set_invariant_load(new_vptr);
181 (_, &ty::Dynamic(ref data, ..)) => {
182 let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
183 cx.layout_of(cx.tcx().mk_mut_ptr(target)),
187 cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
189 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
193 /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
194 pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
199 old_info: Option<Bx::Value>,
200 ) -> (Bx::Value, Bx::Value) {
201 debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
202 match (src_ty.kind(), dst_ty.kind()) {
203 (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
204 | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
205 assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
206 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
207 (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
209 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
210 assert_eq!(def_a, def_b);
211 let src_layout = bx.cx().layout_of(src_ty);
212 let dst_layout = bx.cx().layout_of(dst_ty);
213 if src_ty == dst_ty {
214 return (src, old_info.unwrap());
216 let mut result = None;
217 for i in 0..src_layout.fields.count() {
218 let src_f = src_layout.field(bx.cx(), i);
223 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
224 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
225 assert_eq!(src_layout.size, src_f.size);
227 let dst_f = dst_layout.field(bx.cx(), i);
228 assert_ne!(src_f.ty, dst_f.ty);
229 assert_eq!(result, None);
230 result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
232 let (lldata, llextra) = result.unwrap();
233 let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
234 let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
235 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
236 (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
238 _ => bug!("unsize_ptr: called on bad types"),
242 /// Coerces `src`, which is a reference to a value of type `src_ty`,
243 /// to a value of type `dst_ty`, and stores the result in `dst`.
244 pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
246 src: PlaceRef<'tcx, Bx::Value>,
247 dst: PlaceRef<'tcx, Bx::Value>,
249 let src_ty = src.layout.ty;
250 let dst_ty = dst.layout.ty;
251 match (src_ty.kind(), dst_ty.kind()) {
252 (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
253 let (base, info) = match bx.load_operand(src).val {
254 OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
255 OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
256 OperandValue::Ref(..) => bug!(),
258 OperandValue::Pair(base, info).store(bx, dst);
261 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
262 assert_eq!(def_a, def_b);
264 for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
265 let src_f = src.project_field(bx, i);
266 let dst_f = dst.project_field(bx, i);
268 if dst_f.layout.is_zst() {
272 if src_f.layout.ty == dst_f.layout.ty {
283 coerce_unsized_into(bx, src_f, dst_f);
287 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
291 pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
297 cast_shift_rhs(bx, op, lhs, rhs)
300 fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
306 // Shifts may have any size int on the rhs
308 let mut rhs_llty = bx.cx().val_ty(rhs);
309 let mut lhs_llty = bx.cx().val_ty(lhs);
310 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
311 rhs_llty = bx.cx().element_type(rhs_llty)
313 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
314 lhs_llty = bx.cx().element_type(lhs_llty)
316 let rhs_sz = bx.cx().int_width(rhs_llty);
317 let lhs_sz = bx.cx().int_width(lhs_llty);
319 bx.trunc(rhs, lhs_llty)
320 } else if lhs_sz > rhs_sz {
321 // FIXME (#1877: If in the future shifting by negative
322 // values is no longer undefined then this is wrong.
323 bx.zext(rhs, lhs_llty)
332 /// Returns `true` if this session's target will use SEH-based unwinding.
334 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
335 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
336 /// 64-bit MinGW) instead of "full SEH".
337 pub fn wants_msvc_seh(sess: &Session) -> bool {
338 sess.target.is_like_msvc
341 pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
347 layout: TyAndLayout<'tcx>,
350 let size = layout.size.bytes();
355 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
358 pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
359 cx: &'a Bx::CodegenCx,
360 instance: Instance<'tcx>,
362 // this is an info! to allow collecting monomorphization statistics
363 // and to allow finding the last function before LLVM aborts from
365 info!("codegen_instance({})", instance);
367 mir::codegen_mir::<Bx>(cx, instance);
370 /// Creates the `main` function which will initialize the rust runtime and call
371 /// users main function.
372 pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
373 cx: &'a Bx::CodegenCx,
374 ) -> Option<Bx::Function> {
375 let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
376 let main_is_local = main_def_id.is_local();
377 let instance = Instance::mono(cx.tcx(), main_def_id);
380 // We want to create the wrapper in the same codegen unit as Rust's main
382 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
385 } else if !cx.codegen_unit().is_primary() {
386 // We want to create the wrapper only when the codegen unit is the primary one
390 let main_llfn = cx.get_fn_addr(instance);
392 let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, entry_type);
393 return Some(entry_fn);
395 fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
396 cx: &'a Bx::CodegenCx,
397 rust_main: Bx::Value,
398 rust_main_def_id: DefId,
399 entry_type: EntryFnType,
401 // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
402 // depending on whether the target needs `argc` and `argv` to be passed in.
403 let llfty = if cx.sess().target.main_needs_argc_argv {
404 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
406 cx.type_func(&[], cx.type_int())
409 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
410 // Given that `main()` has no arguments,
411 // then its return type cannot have
412 // late-bound regions, since late-bound
413 // regions must appear in the argument
415 let main_ret_ty = cx.tcx().normalize_erasing_regions(
416 ty::ParamEnv::reveal_all(),
417 main_ret_ty.no_bound_vars().unwrap(),
420 let Some(llfn) = cx.declare_c_main(llfty) else {
421 // FIXME: We should be smart and show a better diagnostic here.
422 let span = cx.tcx().def_span(rust_main_def_id);
424 .struct_span_err(span, "entry symbol `main` declared multiple times")
425 .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
427 cx.sess().abort_if_errors();
431 // `main` should respect same config for frame pointer elimination as rest of code
432 cx.set_frame_pointer_type(llfn);
433 cx.apply_target_cpu_attr(llfn);
435 let llbb = Bx::append_block(&cx, llfn, "top");
436 let mut bx = Bx::build(&cx, llbb);
438 bx.insert_reference_to_gdb_debug_scripts_section_global();
440 let isize_ty = cx.type_isize();
441 let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
442 let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
444 let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
445 let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
446 let start_fn = cx.get_fn_addr(
447 ty::Instance::resolve(
449 ty::ParamEnv::reveal_all(),
451 cx.tcx().intern_substs(&[main_ret_ty.into()]),
457 let i8_ty = cx.type_i8();
458 let arg_sigpipe = bx.const_u8(sigpipe);
461 cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
462 (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
464 debug!("using user-defined start fn");
465 let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
466 (rust_main, start_ty, vec![arg_argc, arg_argv])
469 let result = bx.call(start_ty, start_fn, &args, None);
470 let cast = bx.intcast(result, cx.type_int(), true);
477 /// Obtain the `argc` and `argv` values to pass to the rust start function.
478 fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
479 cx: &'a Bx::CodegenCx,
481 ) -> (Bx::Value, Bx::Value) {
482 if cx.sess().target.main_needs_argc_argv {
483 // Params from native `main()` used as args for rust start function
484 let param_argc = bx.get_param(0);
485 let param_argv = bx.get_param(1);
486 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
487 let arg_argv = param_argv;
490 // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
491 let arg_argc = bx.const_int(cx.type_int(), 0);
492 let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
497 /// This function returns all of the debugger visualizers specified for the
498 /// current crate as well as all upstream crates transitively that match the
499 /// `visualizer_type` specified.
500 pub fn collect_debugger_visualizers_transitive(
502 visualizer_type: DebuggerVisualizerType,
503 ) -> BTreeSet<DebuggerVisualizerFile> {
504 tcx.debugger_visualizers(LOCAL_CRATE)
510 let used_crate_source = tcx.used_crate_source(*cnum);
511 used_crate_source.rlib.is_some() || used_crate_source.rmeta.is_some()
513 .flat_map(|&cnum| tcx.debugger_visualizers(cnum)),
515 .filter(|visualizer| visualizer.visualizer_type == visualizer_type)
517 .collect::<BTreeSet<_>>()
520 pub fn codegen_crate<B: ExtraBackendMethods>(
524 metadata: EncodedMetadata,
525 need_metadata_module: bool,
526 ) -> OngoingCodegen<B> {
527 // Skip crate items and just output metadata in -Z no-codegen mode.
528 if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
529 let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
531 ongoing_codegen.codegen_finished(tcx);
533 ongoing_codegen.check_for_errors(tcx.sess);
535 return ongoing_codegen;
538 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
540 // Run the monomorphization collector and partition the collected items into
542 let codegen_units = tcx.collect_and_partition_mono_items(()).1;
544 // Force all codegen_unit queries so they are already either red or green
545 // when compile_codegen_unit accesses them. We are not able to re-execute
546 // the codegen_unit query from just the DepNode, so an unknown color would
547 // lead to having to re-execute compile_codegen_unit, possibly
549 if tcx.dep_graph.is_fully_enabled() {
550 for cgu in codegen_units {
551 tcx.ensure().codegen_unit(cgu.name());
555 let metadata_module = if need_metadata_module {
556 // Emit compressed metadata object.
557 let metadata_cgu_name =
558 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
559 tcx.sess.time("write_compressed_metadata", || {
561 tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
562 let data = create_compressed_metadata_file(
565 &exported_symbols::metadata_symbol_name(tcx),
567 if let Err(err) = std::fs::write(&file_name, data) {
568 tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
570 Some(CompiledModule {
571 name: metadata_cgu_name,
572 kind: ModuleKind::Metadata,
573 object: Some(file_name),
582 let ongoing_codegen = start_async_codegen(
591 // Codegen an allocator shim, if necessary.
593 // If the crate doesn't have an `allocator_kind` set then there's definitely
594 // no shim to generate. Otherwise we also check our dependency graph for all
595 // our output crate types. If anything there looks like its a `Dynamic`
596 // linkage, then it's already got an allocator shim and we'll be using that
597 // one instead. If nothing exists then it's our job to generate the
599 let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
600 use rustc_middle::middle::dependency_format::Linkage;
601 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
603 let allocator_module = if any_dynamic_crate {
605 } else if let Some(kind) = tcx.allocator_kind(()) {
607 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
608 let module_llvm = tcx.sess.time("write_allocator_module", || {
609 backend.codegen_allocator(tcx, &llmod_id, kind, tcx.lang_items().oom().is_some())
612 Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
617 if let Some(allocator_module) = allocator_module {
618 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
621 // For better throughput during parallel processing by LLVM, we used to sort
622 // CGUs largest to smallest. This would lead to better thread utilization
623 // by, for example, preventing a large CGU from being processed last and
624 // having only one LLVM thread working while the rest remained idle.
626 // However, this strategy would lead to high memory usage, as it meant the
627 // LLVM-IR for all of the largest CGUs would be resident in memory at once.
629 // Instead, we can compromise by ordering CGUs such that the largest and
630 // smallest are first, second largest and smallest are next, etc. If there
631 // are large size variations, this can reduce memory usage significantly.
632 let codegen_units: Vec<_> = {
633 let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
634 sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
636 let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
637 second_half.iter().rev().interleave(first_half).copied().collect()
640 // Calculate the CGU reuse
641 let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
642 codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
645 let mut total_codegen_time = Duration::new(0, 0);
646 let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
648 // The non-parallel compiler can only translate codegen units to LLVM IR
649 // on a single thread, leading to a staircase effect where the N LLVM
650 // threads have to wait on the single codegen threads to generate work
651 // for them. The parallel compiler does not have this restriction, so
652 // we can pre-load the LLVM queue in parallel before handing off
653 // coordination to the OnGoingCodegen scheduler.
655 // This likely is a temporary measure. Once we don't have to support the
656 // non-parallel compiler anymore, we can compile CGUs end-to-end in
657 // parallel and get rid of the complicated scheduling logic.
658 let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
659 tcx.sess.time("compile_first_CGU_batch", || {
660 // Try to find one CGU to compile per thread.
661 let cgus: Vec<_> = cgu_reuse
664 .filter(|&(_, reuse)| reuse == &CguReuse::No)
665 .take(tcx.sess.threads())
668 // Compile the found CGUs in parallel.
669 let start_time = Instant::now();
671 let pre_compiled_cgus = par_iter(cgus)
673 let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
678 total_codegen_time += start_time.elapsed();
686 for (i, cgu) in codegen_units.iter().enumerate() {
687 ongoing_codegen.wait_for_signal_to_codegen_item();
688 ongoing_codegen.check_for_errors(tcx.sess);
690 let cgu_reuse = cgu_reuse[i];
691 tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
695 let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
698 let start_time = Instant::now();
699 let module = backend.compile_codegen_unit(tcx, cgu.name());
700 total_codegen_time += start_time.elapsed();
703 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
704 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
705 // compilation hang on post-monomorphization errors.
706 tcx.sess.abort_if_errors();
708 submit_codegened_module_to_llvm(
710 &ongoing_codegen.coordinator.sender,
716 CguReuse::PreLto => {
717 submit_pre_lto_module_to_llvm(
720 &ongoing_codegen.coordinator.sender,
721 CachedModuleCodegen {
722 name: cgu.name().to_string(),
723 source: cgu.previous_work_product(tcx),
728 CguReuse::PostLto => {
729 submit_post_lto_module_to_llvm(
731 &ongoing_codegen.coordinator.sender,
732 CachedModuleCodegen {
733 name: cgu.name().to_string(),
734 source: cgu.previous_work_product(tcx),
742 ongoing_codegen.codegen_finished(tcx);
744 // Since the main thread is sometimes blocked during codegen, we keep track
745 // -Ztime-passes output manually.
746 if tcx.sess.time_passes() {
747 let end_rss = get_resident_set_size();
749 print_time_passes_entry(
750 "codegen_to_LLVM_IR",
757 ongoing_codegen.check_for_errors(tcx.sess);
762 pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
763 let exported_symbols = tcx
767 .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
769 let linked_symbols = tcx
773 .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
775 let local_crate_name = tcx.crate_name(LOCAL_CRATE);
776 let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
777 let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
778 let windows_subsystem = subsystem.map(|subsystem| {
779 if subsystem != sym::windows && subsystem != sym::console {
780 tcx.sess.fatal(&format!(
781 "invalid windows subsystem `{}`, only \
782 `windows` and `console` are allowed",
786 subsystem.to_string()
789 // This list is used when generating the command line to pass through to
790 // system linker. The linker expects undefined symbols on the left of the
791 // command line to be defined in libraries on the right, not the other way
792 // around. For more info, see some comments in the add_used_library function
795 // In order to get this left-to-right dependency ordering, we use the reverse
796 // postorder of all crates putting the leaves at the right-most positions.
797 let used_crates = tcx
802 .filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
805 let mut info = CrateInfo {
810 compiler_builtins: None,
811 profiler_runtime: None,
812 is_no_builtins: Default::default(),
813 native_libraries: Default::default(),
814 used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
815 crate_name: Default::default(),
817 used_crate_source: Default::default(),
818 lang_item_to_crate: Default::default(),
819 missing_lang_items: Default::default(),
820 dependency_formats: tcx.dependency_formats(()).clone(),
822 natvis_debugger_visualizers: Default::default(),
824 let lang_items = tcx.lang_items();
826 let crates = tcx.crates(());
828 let n_crates = crates.len();
829 info.native_libraries.reserve(n_crates);
830 info.crate_name.reserve(n_crates);
831 info.used_crate_source.reserve(n_crates);
832 info.missing_lang_items.reserve(n_crates);
834 for &cnum in crates.iter() {
835 info.native_libraries
836 .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
837 info.crate_name.insert(cnum, tcx.crate_name(cnum));
839 let used_crate_source = tcx.used_crate_source(cnum);
840 info.used_crate_source.insert(cnum, used_crate_source.clone());
841 if tcx.is_compiler_builtins(cnum) {
842 info.compiler_builtins = Some(cnum);
844 if tcx.is_profiler_runtime(cnum) {
845 info.profiler_runtime = Some(cnum);
847 if tcx.is_no_builtins(cnum) {
848 info.is_no_builtins.insert(cnum);
850 let missing = tcx.missing_lang_items(cnum);
851 for &item in missing.iter() {
852 if let Ok(id) = lang_items.require(item) {
853 info.lang_item_to_crate.insert(item, id.krate);
857 // No need to look for lang items that don't actually need to exist.
859 missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
860 info.missing_lang_items.insert(cnum, missing);
863 let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
864 CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
865 // These are crate types for which we invoke the linker and can embed
866 // NatVis visualizers.
869 CrateType::ProcMacro => {
870 // We could embed NatVis for proc macro crates too (to improve the debugging
871 // experience for them) but it does not seem like a good default, since
872 // this is a rare use case and we don't want to slow down the common case.
875 CrateType::Staticlib | CrateType::Rlib => {
876 // We don't invoke the linker for these, so we don't need to collect the NatVis for them.
881 if tcx.sess.target.is_like_msvc && embed_visualizers {
882 info.natvis_debugger_visualizers =
883 collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
890 pub fn provide(providers: &mut Providers) {
891 providers.backend_optimization_level = |tcx, cratenum| {
892 let for_speed = match tcx.sess.opts.optimize {
893 // If globally no optimisation is done, #[optimize] has no effect.
895 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
896 // pass manager and it is likely that some module-wide passes (such as inliner or
897 // cross-function constant propagation) would ignore the `optnone` annotation we put
898 // on the functions, thus necessarily involving these functions into optimisations.
899 config::OptLevel::No => return config::OptLevel::No,
900 // If globally optimise-speed is already specified, just use that level.
901 config::OptLevel::Less => return config::OptLevel::Less,
902 config::OptLevel::Default => return config::OptLevel::Default,
903 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
904 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
906 config::OptLevel::Size => config::OptLevel::Default,
907 config::OptLevel::SizeMin => config::OptLevel::Default,
910 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
912 let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
914 attr::OptimizeAttr::None => continue,
915 attr::OptimizeAttr::Size => continue,
916 attr::OptimizeAttr::Speed => {
921 tcx.sess.opts.optimize
925 fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
926 if !tcx.dep_graph.is_fully_enabled() {
930 let work_product_id = &cgu.work_product_id();
931 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
932 // We don't have anything cached for this CGU. This can happen
933 // if the CGU did not exist in the previous session.
937 // Try to mark the CGU as green. If it we can do so, it means that nothing
938 // affecting the LLVM module has changed and we can re-use a cached version.
939 // If we compile with any kind of LTO, this means we can re-use the bitcode
940 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
941 // know that later). If we are not doing LTO, there is only one optimized
942 // version of each module, so we re-use that.
943 let dep_node = cgu.codegen_dep_node(tcx);
945 !tcx.dep_graph.dep_node_exists(&dep_node),
946 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
950 if tcx.try_mark_green(&dep_node) {
951 // We can re-use either the pre- or the post-thinlto state. If no LTO is
952 // being performed then we can use post-LTO artifacts, otherwise we must
953 // reuse pre-LTO artifacts
954 match compute_per_cgu_lto_type(
957 &tcx.sess.crate_types(),
960 ComputedLtoType::No => CguReuse::PostLto,
961 _ => CguReuse::PreLto,