1 use crate::back::link::are_upstream_rust_objects_already_included;
2 use crate::back::metadata::create_compressed_metadata_file;
3 use crate::back::write::{
4 compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
5 submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
7 use crate::common::{IntPredicate, RealPredicate, TypeKind};
11 use crate::mir::operand::OperandValue;
12 use crate::mir::place::PlaceRef;
14 use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
16 use rustc_attr as attr;
17 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
18 use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
20 use rustc_data_structures::sync::par_iter;
21 #[cfg(parallel_compiler)]
22 use rustc_data_structures::sync::ParallelIterator;
24 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
25 use rustc_hir::lang_items::LangItem;
26 use rustc_index::vec::Idx;
27 use rustc_metadata::EncodedMetadata;
28 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
29 use rustc_middle::middle::exported_symbols;
30 use rustc_middle::middle::exported_symbols::SymbolExportKind;
31 use rustc_middle::middle::lang_items;
32 use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
33 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
34 use rustc_middle::ty::query::Providers;
35 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
36 use rustc_session::cgu_reuse_tracker::CguReuse;
37 use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
38 use rustc_session::Session;
39 use rustc_span::symbol::sym;
40 use rustc_span::Symbol;
41 use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
42 use rustc_target::abi::{Align, Size, VariantIdx};
44 use std::collections::BTreeSet;
45 use std::time::{Duration, Instant};
47 use itertools::Itertools;
49 pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
51 hir::BinOpKind::Eq => IntPredicate::IntEQ,
52 hir::BinOpKind::Ne => IntPredicate::IntNE,
53 hir::BinOpKind::Lt => {
60 hir::BinOpKind::Le => {
67 hir::BinOpKind::Gt => {
74 hir::BinOpKind::Ge => {
82 "comparison_op_to_icmp_predicate: expected comparison operator, \
89 pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
91 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
92 hir::BinOpKind::Ne => RealPredicate::RealUNE,
93 hir::BinOpKind::Lt => RealPredicate::RealOLT,
94 hir::BinOpKind::Le => RealPredicate::RealOLE,
95 hir::BinOpKind::Gt => RealPredicate::RealOGT,
96 hir::BinOpKind::Ge => RealPredicate::RealOGE,
99 "comparison_op_to_fcmp_predicate: expected comparison operator, \
107 pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
115 let signed = match t.kind() {
117 let cmp = bin_op_to_fcmp_predicate(op);
118 let cmp = bx.fcmp(cmp, lhs, rhs);
119 return bx.sext(cmp, ret_ty);
121 ty::Uint(_) => false,
123 _ => bug!("compare_simd_types: invalid SIMD type"),
126 let cmp = bin_op_to_icmp_predicate(op, signed);
127 let cmp = bx.icmp(cmp, lhs, rhs);
128 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
129 // to get the correctly sized type. This will compile to a single instruction
130 // once the IR is converted to assembly if the SIMD instruction is supported
131 // by the target architecture.
135 /// Retrieves the information we are losing (making dynamic) in an unsizing
138 /// The `old_info` argument is a bit odd. It is intended for use in an upcast,
139 /// where the new vtable for an object will be derived from the old one.
140 pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
144 old_info: Option<Bx::Value>,
147 let (source, target) =
148 cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
149 match (source.kind(), target.kind()) {
150 (&ty::Array(_, len), &ty::Slice(_)) => {
151 cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
154 &ty::Dynamic(ref data_a, _, src_dyn_kind),
155 &ty::Dynamic(ref data_b, _, target_dyn_kind),
156 ) if src_dyn_kind == target_dyn_kind => {
158 old_info.expect("unsized_info: missing old info for trait upcasting coercion");
159 if data_a.principal_def_id() == data_b.principal_def_id() {
160 // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
164 // trait upcasting coercion
167 cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
169 if let Some(entry_idx) = vptr_entry_idx {
170 let ptr_ty = cx.type_i8p();
171 let ptr_align = cx.tcx().data_layout.pointer_align.abi;
172 let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
173 let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
174 let gep = bx.inbounds_gep(
177 &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
179 let new_vptr = bx.load(ptr_ty, gep, ptr_align);
180 bx.nonnull_metadata(new_vptr);
181 // VTable loads are invariant.
182 bx.set_invariant_load(new_vptr);
183 bx.pointercast(new_vptr, vtable_ptr_ty)
188 (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => {
189 let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
190 cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
192 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
196 // Returns the vtable pointer type of a `dyn` or `dyn*` type
197 fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
201 ) -> <Cx as BackendTypes>::Type {
202 cx.scalar_pair_element_backend_type(
203 cx.layout_of(match kind {
204 // vtable is the second field of `*mut dyn Trait`
205 ty::Dyn => cx.tcx().mk_mut_ptr(target),
206 // vtable is the second field of `dyn* Trait`
207 ty::DynStar => target,
214 /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
215 pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
220 old_info: Option<Bx::Value>,
221 ) -> (Bx::Value, Bx::Value) {
222 debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
223 match (src_ty.kind(), dst_ty.kind()) {
224 (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
225 | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
226 assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
227 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
228 (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
230 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
231 assert_eq!(def_a, def_b);
232 let src_layout = bx.cx().layout_of(src_ty);
233 let dst_layout = bx.cx().layout_of(dst_ty);
234 if src_ty == dst_ty {
235 return (src, old_info.unwrap());
237 let mut result = None;
238 for i in 0..src_layout.fields.count() {
239 let src_f = src_layout.field(bx.cx(), i);
244 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
245 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
246 assert_eq!(src_layout.size, src_f.size);
248 let dst_f = dst_layout.field(bx.cx(), i);
249 assert_ne!(src_f.ty, dst_f.ty);
250 assert_eq!(result, None);
251 result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
253 let (lldata, llextra) = result.unwrap();
254 let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
255 let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
256 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
257 (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
259 _ => bug!("unsize_ptr: called on bad types"),
263 /// Coerces `src` to `dst_ty` which is guaranteed to be a `dyn*` type.
264 pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
267 src_ty_and_layout: TyAndLayout<'tcx>,
269 old_info: Option<Bx::Value>,
270 ) -> (Bx::Value, Bx::Value) {
271 debug!("cast_to_dyn_star: {:?} => {:?}", src_ty_and_layout.ty, dst_ty);
273 matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
274 "destination type must be a dyn*"
276 // FIXME(dyn-star): this is probably not the best way to check if this is
277 // a pointer, and really we should ensure that the value is a suitable
278 // pointer earlier in the compilation process.
279 let src = match src_ty_and_layout.pointee_info_at(bx.cx(), Size::ZERO) {
280 Some(_) => bx.ptrtoint(src, bx.cx().type_isize()),
281 None => bx.bitcast(src, bx.type_isize()),
283 (src, unsized_info(bx, src_ty_and_layout.ty, dst_ty, old_info))
286 /// Coerces `src`, which is a reference to a value of type `src_ty`,
287 /// to a value of type `dst_ty`, and stores the result in `dst`.
288 pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
290 src: PlaceRef<'tcx, Bx::Value>,
291 dst: PlaceRef<'tcx, Bx::Value>,
293 let src_ty = src.layout.ty;
294 let dst_ty = dst.layout.ty;
295 match (src_ty.kind(), dst_ty.kind()) {
296 (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
297 let (base, info) = match bx.load_operand(src).val {
298 OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
299 OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
300 OperandValue::Ref(..) => bug!(),
302 OperandValue::Pair(base, info).store(bx, dst);
305 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
306 assert_eq!(def_a, def_b);
308 for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
309 let src_f = src.project_field(bx, i);
310 let dst_f = dst.project_field(bx, i);
312 if dst_f.layout.is_zst() {
316 if src_f.layout.ty == dst_f.layout.ty {
327 coerce_unsized_into(bx, src_f, dst_f);
331 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
335 pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
340 // Shifts may have any size int on the rhs
341 let mut rhs_llty = bx.cx().val_ty(rhs);
342 let mut lhs_llty = bx.cx().val_ty(lhs);
343 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
344 rhs_llty = bx.cx().element_type(rhs_llty)
346 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
347 lhs_llty = bx.cx().element_type(lhs_llty)
349 let rhs_sz = bx.cx().int_width(rhs_llty);
350 let lhs_sz = bx.cx().int_width(lhs_llty);
352 bx.trunc(rhs, lhs_llty)
353 } else if lhs_sz > rhs_sz {
354 // FIXME (#1877: If in the future shifting by negative
355 // values is no longer undefined then this is wrong.
356 bx.zext(rhs, lhs_llty)
362 /// Returns `true` if this session's target will use SEH-based unwinding.
364 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
365 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
366 /// 64-bit MinGW) instead of "full SEH".
367 pub fn wants_msvc_seh(sess: &Session) -> bool {
368 sess.target.is_like_msvc
371 pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
377 layout: TyAndLayout<'tcx>,
380 let size = layout.size.bytes();
385 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
388 pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
389 cx: &'a Bx::CodegenCx,
390 instance: Instance<'tcx>,
392 // this is an info! to allow collecting monomorphization statistics
393 // and to allow finding the last function before LLVM aborts from
395 info!("codegen_instance({})", instance);
397 mir::codegen_mir::<Bx>(cx, instance);
400 /// Creates the `main` function which will initialize the rust runtime and call
401 /// users main function.
402 pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
403 cx: &'a Bx::CodegenCx,
404 ) -> Option<Bx::Function> {
405 let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
406 let main_is_local = main_def_id.is_local();
407 let instance = Instance::mono(cx.tcx(), main_def_id);
410 // We want to create the wrapper in the same codegen unit as Rust's main
412 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
415 } else if !cx.codegen_unit().is_primary() {
416 // We want to create the wrapper only when the codegen unit is the primary one
420 let main_llfn = cx.get_fn_addr(instance);
422 let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, entry_type);
423 return Some(entry_fn);
425 fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
426 cx: &'a Bx::CodegenCx,
427 rust_main: Bx::Value,
428 rust_main_def_id: DefId,
429 entry_type: EntryFnType,
431 // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
432 // depending on whether the target needs `argc` and `argv` to be passed in.
433 let llfty = if cx.sess().target.main_needs_argc_argv {
434 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
436 cx.type_func(&[], cx.type_int())
439 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
440 // Given that `main()` has no arguments,
441 // then its return type cannot have
442 // late-bound regions, since late-bound
443 // regions must appear in the argument
445 let main_ret_ty = cx.tcx().normalize_erasing_regions(
446 ty::ParamEnv::reveal_all(),
447 main_ret_ty.no_bound_vars().unwrap(),
450 let Some(llfn) = cx.declare_c_main(llfty) else {
451 // FIXME: We should be smart and show a better diagnostic here.
452 let span = cx.tcx().def_span(rust_main_def_id);
453 cx.sess().emit_err(errors::MultipleMainFunctions { span });
454 cx.sess().abort_if_errors();
458 // `main` should respect same config for frame pointer elimination as rest of code
459 cx.set_frame_pointer_type(llfn);
460 cx.apply_target_cpu_attr(llfn);
462 let llbb = Bx::append_block(&cx, llfn, "top");
463 let mut bx = Bx::build(&cx, llbb);
465 bx.insert_reference_to_gdb_debug_scripts_section_global();
467 let isize_ty = cx.type_isize();
468 let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
469 let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
471 let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
472 let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
473 let start_fn = cx.get_fn_addr(
474 ty::Instance::resolve(
476 ty::ParamEnv::reveal_all(),
478 cx.tcx().intern_substs(&[main_ret_ty.into()]),
484 let i8_ty = cx.type_i8();
485 let arg_sigpipe = bx.const_u8(sigpipe);
488 cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
489 (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
491 debug!("using user-defined start fn");
492 let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
493 (rust_main, start_ty, vec![arg_argc, arg_argv])
496 let result = bx.call(start_ty, None, start_fn, &args, None);
497 let cast = bx.intcast(result, cx.type_int(), true);
504 /// Obtain the `argc` and `argv` values to pass to the rust start function.
505 fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
506 cx: &'a Bx::CodegenCx,
508 ) -> (Bx::Value, Bx::Value) {
509 if cx.sess().target.main_needs_argc_argv {
510 // Params from native `main()` used as args for rust start function
511 let param_argc = bx.get_param(0);
512 let param_argv = bx.get_param(1);
513 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
514 let arg_argv = param_argv;
517 // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
518 let arg_argc = bx.const_int(cx.type_int(), 0);
519 let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
524 /// This function returns all of the debugger visualizers specified for the
525 /// current crate as well as all upstream crates transitively that match the
526 /// `visualizer_type` specified.
527 pub fn collect_debugger_visualizers_transitive(
529 visualizer_type: DebuggerVisualizerType,
530 ) -> BTreeSet<DebuggerVisualizerFile> {
531 tcx.debugger_visualizers(LOCAL_CRATE)
537 let used_crate_source = tcx.used_crate_source(*cnum);
538 used_crate_source.rlib.is_some() || used_crate_source.rmeta.is_some()
540 .flat_map(|&cnum| tcx.debugger_visualizers(cnum)),
542 .filter(|visualizer| visualizer.visualizer_type == visualizer_type)
544 .collect::<BTreeSet<_>>()
547 pub fn codegen_crate<B: ExtraBackendMethods>(
551 metadata: EncodedMetadata,
552 need_metadata_module: bool,
553 ) -> OngoingCodegen<B> {
554 // Skip crate items and just output metadata in -Z no-codegen mode.
555 if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
556 let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
558 ongoing_codegen.codegen_finished(tcx);
560 ongoing_codegen.check_for_errors(tcx.sess);
562 return ongoing_codegen;
565 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
567 // Run the monomorphization collector and partition the collected items into
569 let codegen_units = tcx.collect_and_partition_mono_items(()).1;
571 // Force all codegen_unit queries so they are already either red or green
572 // when compile_codegen_unit accesses them. We are not able to re-execute
573 // the codegen_unit query from just the DepNode, so an unknown color would
574 // lead to having to re-execute compile_codegen_unit, possibly
576 if tcx.dep_graph.is_fully_enabled() {
577 for cgu in codegen_units {
578 tcx.ensure().codegen_unit(cgu.name());
582 let metadata_module = if need_metadata_module {
583 // Emit compressed metadata object.
584 let metadata_cgu_name =
585 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
586 tcx.sess.time("write_compressed_metadata", || {
588 tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
589 let data = create_compressed_metadata_file(
592 &exported_symbols::metadata_symbol_name(tcx),
594 if let Err(error) = std::fs::write(&file_name, data) {
595 tcx.sess.emit_fatal(errors::MetadataObjectFileWrite { error });
597 Some(CompiledModule {
598 name: metadata_cgu_name,
599 kind: ModuleKind::Metadata,
600 object: Some(file_name),
609 let ongoing_codegen = start_async_codegen(
618 // Codegen an allocator shim, if necessary.
620 // If the crate doesn't have an `allocator_kind` set then there's definitely
621 // no shim to generate. Otherwise we also check our dependency graph for all
622 // our output crate types. If anything there looks like its a `Dynamic`
623 // linkage, then it's already got an allocator shim and we'll be using that
624 // one instead. If nothing exists then it's our job to generate the
626 let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
627 use rustc_middle::middle::dependency_format::Linkage;
628 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
630 let allocator_module = if any_dynamic_crate {
632 } else if let Some(kind) = tcx.allocator_kind(()) {
634 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
635 let module_llvm = tcx.sess.time("write_allocator_module", || {
636 backend.codegen_allocator(
640 // If allocator_kind is Some then alloc_error_handler_kind must
642 tcx.alloc_error_handler_kind(()).unwrap(),
646 Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
651 if let Some(allocator_module) = allocator_module {
652 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
655 // For better throughput during parallel processing by LLVM, we used to sort
656 // CGUs largest to smallest. This would lead to better thread utilization
657 // by, for example, preventing a large CGU from being processed last and
658 // having only one LLVM thread working while the rest remained idle.
660 // However, this strategy would lead to high memory usage, as it meant the
661 // LLVM-IR for all of the largest CGUs would be resident in memory at once.
663 // Instead, we can compromise by ordering CGUs such that the largest and
664 // smallest are first, second largest and smallest are next, etc. If there
665 // are large size variations, this can reduce memory usage significantly.
666 let codegen_units: Vec<_> = {
667 let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
668 sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
670 let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
671 second_half.iter().rev().interleave(first_half).copied().collect()
674 // Calculate the CGU reuse
675 let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
676 codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
679 let mut total_codegen_time = Duration::new(0, 0);
680 let start_rss = tcx.sess.opts.unstable_opts.time_passes.then(|| get_resident_set_size());
682 // The non-parallel compiler can only translate codegen units to LLVM IR
683 // on a single thread, leading to a staircase effect where the N LLVM
684 // threads have to wait on the single codegen threads to generate work
685 // for them. The parallel compiler does not have this restriction, so
686 // we can pre-load the LLVM queue in parallel before handing off
687 // coordination to the OnGoingCodegen scheduler.
689 // This likely is a temporary measure. Once we don't have to support the
690 // non-parallel compiler anymore, we can compile CGUs end-to-end in
691 // parallel and get rid of the complicated scheduling logic.
692 let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
693 tcx.sess.time("compile_first_CGU_batch", || {
694 // Try to find one CGU to compile per thread.
695 let cgus: Vec<_> = cgu_reuse
698 .filter(|&(_, reuse)| reuse == &CguReuse::No)
699 .take(tcx.sess.threads())
702 // Compile the found CGUs in parallel.
703 let start_time = Instant::now();
705 let pre_compiled_cgus = par_iter(cgus)
707 let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
712 total_codegen_time += start_time.elapsed();
720 for (i, cgu) in codegen_units.iter().enumerate() {
721 ongoing_codegen.wait_for_signal_to_codegen_item();
722 ongoing_codegen.check_for_errors(tcx.sess);
724 let cgu_reuse = cgu_reuse[i];
725 tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
729 let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
732 let start_time = Instant::now();
733 let module = backend.compile_codegen_unit(tcx, cgu.name());
734 total_codegen_time += start_time.elapsed();
737 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
738 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
739 // compilation hang on post-monomorphization errors.
740 tcx.sess.abort_if_errors();
742 submit_codegened_module_to_llvm(
744 &ongoing_codegen.coordinator.sender,
750 CguReuse::PreLto => {
751 submit_pre_lto_module_to_llvm(
754 &ongoing_codegen.coordinator.sender,
755 CachedModuleCodegen {
756 name: cgu.name().to_string(),
757 source: cgu.previous_work_product(tcx),
762 CguReuse::PostLto => {
763 submit_post_lto_module_to_llvm(
765 &ongoing_codegen.coordinator.sender,
766 CachedModuleCodegen {
767 name: cgu.name().to_string(),
768 source: cgu.previous_work_product(tcx),
776 ongoing_codegen.codegen_finished(tcx);
778 // Since the main thread is sometimes blocked during codegen, we keep track
779 // -Ztime-passes output manually.
780 if tcx.sess.opts.unstable_opts.time_passes {
781 let end_rss = get_resident_set_size();
783 print_time_passes_entry(
784 "codegen_to_LLVM_IR",
791 ongoing_codegen.check_for_errors(tcx.sess);
796 pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
797 let exported_symbols = tcx
801 .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
803 let linked_symbols = tcx
807 .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
809 let local_crate_name = tcx.crate_name(LOCAL_CRATE);
810 let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
811 let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
812 let windows_subsystem = subsystem.map(|subsystem| {
813 if subsystem != sym::windows && subsystem != sym::console {
814 tcx.sess.emit_fatal(errors::InvalidWindowsSubsystem { subsystem });
816 subsystem.to_string()
819 // This list is used when generating the command line to pass through to
820 // system linker. The linker expects undefined symbols on the left of the
821 // command line to be defined in libraries on the right, not the other way
822 // around. For more info, see some comments in the add_used_library function
825 // In order to get this left-to-right dependency ordering, we use the reverse
826 // postorder of all crates putting the leaves at the right-most positions.
827 let mut compiler_builtins = None;
828 let mut used_crates: Vec<_> = tcx
834 let link = !tcx.dep_kind(cnum).macros_only();
835 if link && tcx.is_compiler_builtins(cnum) {
836 compiler_builtins = Some(cnum);
842 // `compiler_builtins` are always placed last to ensure that they're linked correctly.
843 used_crates.extend(compiler_builtins);
845 let mut info = CrateInfo {
851 profiler_runtime: None,
852 is_no_builtins: Default::default(),
853 native_libraries: Default::default(),
854 used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
855 crate_name: Default::default(),
857 used_crate_source: Default::default(),
858 dependency_formats: tcx.dependency_formats(()).clone(),
860 natvis_debugger_visualizers: Default::default(),
862 let crates = tcx.crates(());
864 let n_crates = crates.len();
865 info.native_libraries.reserve(n_crates);
866 info.crate_name.reserve(n_crates);
867 info.used_crate_source.reserve(n_crates);
869 for &cnum in crates.iter() {
870 info.native_libraries
871 .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
872 info.crate_name.insert(cnum, tcx.crate_name(cnum));
874 let used_crate_source = tcx.used_crate_source(cnum);
875 info.used_crate_source.insert(cnum, used_crate_source.clone());
876 if tcx.is_profiler_runtime(cnum) {
877 info.profiler_runtime = Some(cnum);
879 if tcx.is_no_builtins(cnum) {
880 info.is_no_builtins.insert(cnum);
884 // Handle circular dependencies in the standard library.
885 // See comment before `add_linked_symbol_object` function for the details.
886 // If global LTO is enabled then almost everything (*) is glued into a single object file,
887 // so this logic is not necessary and can cause issues on some targets (due to weak lang
888 // item symbols being "privatized" to that object file), so we disable it.
889 // (*) Native libs, and `#[compiler_builtins]` and `#[no_builtins]` crates are not glued,
890 // and we assume that they cannot define weak lang items. This is not currently enforced
891 // by the compiler, but that's ok because all this stuff is unstable anyway.
892 let target = &tcx.sess.target;
893 if !are_upstream_rust_objects_already_included(tcx.sess) {
894 let missing_weak_lang_items: FxHashSet<Symbol> = info
897 .flat_map(|&cnum| tcx.missing_lang_items(cnum))
898 .filter(|l| l.is_weak())
900 let name = l.link_name()?;
901 lang_items::required(tcx, l).then_some(name)
904 let prefix = if target.is_like_windows && target.arch == "x86" { "_" } else { "" };
907 .filter(|(crate_type, _)| {
908 !matches!(crate_type, CrateType::Rlib | CrateType::Staticlib)
910 .for_each(|(_, linked_symbols)| {
911 linked_symbols.extend(
912 missing_weak_lang_items
914 .map(|item| (format!("{prefix}{item}"), SymbolExportKind::Text)),
919 let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
920 CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
921 // These are crate types for which we invoke the linker and can embed
922 // NatVis visualizers.
925 CrateType::ProcMacro => {
926 // We could embed NatVis for proc macro crates too (to improve the debugging
927 // experience for them) but it does not seem like a good default, since
928 // this is a rare use case and we don't want to slow down the common case.
931 CrateType::Staticlib | CrateType::Rlib => {
932 // We don't invoke the linker for these, so we don't need to collect the NatVis for them.
937 if target.is_like_msvc && embed_visualizers {
938 info.natvis_debugger_visualizers =
939 collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
946 pub fn provide(providers: &mut Providers) {
947 providers.backend_optimization_level = |tcx, cratenum| {
948 let for_speed = match tcx.sess.opts.optimize {
949 // If globally no optimisation is done, #[optimize] has no effect.
951 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
952 // pass manager and it is likely that some module-wide passes (such as inliner or
953 // cross-function constant propagation) would ignore the `optnone` annotation we put
954 // on the functions, thus necessarily involving these functions into optimisations.
955 config::OptLevel::No => return config::OptLevel::No,
956 // If globally optimise-speed is already specified, just use that level.
957 config::OptLevel::Less => return config::OptLevel::Less,
958 config::OptLevel::Default => return config::OptLevel::Default,
959 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
960 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
962 config::OptLevel::Size => config::OptLevel::Default,
963 config::OptLevel::SizeMin => config::OptLevel::Default,
966 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
968 let any_for_speed = defids.items().any(|id| {
969 let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
971 attr::OptimizeAttr::None | attr::OptimizeAttr::Size => false,
972 attr::OptimizeAttr::Speed => true,
980 tcx.sess.opts.optimize
984 fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
985 if !tcx.dep_graph.is_fully_enabled() {
989 let work_product_id = &cgu.work_product_id();
990 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
991 // We don't have anything cached for this CGU. This can happen
992 // if the CGU did not exist in the previous session.
996 // Try to mark the CGU as green. If it we can do so, it means that nothing
997 // affecting the LLVM module has changed and we can re-use a cached version.
998 // If we compile with any kind of LTO, this means we can re-use the bitcode
999 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
1000 // know that later). If we are not doing LTO, there is only one optimized
1001 // version of each module, so we re-use that.
1002 let dep_node = cgu.codegen_dep_node(tcx);
1004 !tcx.dep_graph.dep_node_exists(&dep_node),
1005 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
1009 if tcx.try_mark_green(&dep_node) {
1010 // We can re-use either the pre- or the post-thinlto state. If no LTO is
1011 // being performed then we can use post-LTO artifacts, otherwise we must
1012 // reuse pre-LTO artifacts
1013 match compute_per_cgu_lto_type(
1016 &tcx.sess.crate_types(),
1017 ModuleKind::Regular,
1019 ComputedLtoType::No => CguReuse::PostLto,
1020 _ => CguReuse::PreLto,