1 use crate::back::write::{
2 compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
3 submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
5 use crate::common::{IntPredicate, RealPredicate, TypeKind};
8 use crate::mir::operand::OperandValue;
9 use crate::mir::place::PlaceRef;
11 use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
13 use rustc_attr as attr;
14 use rustc_data_structures::fx::FxHashMap;
15 use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
16 use rustc_data_structures::sync::{par_iter, ParallelIterator};
18 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
19 use rustc_hir::lang_items::LangItem;
20 use rustc_index::vec::Idx;
21 use rustc_metadata::EncodedMetadata;
22 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
23 use rustc_middle::middle::lang_items;
24 use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
25 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
26 use rustc_middle::ty::query::Providers;
27 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
28 use rustc_session::cgu_reuse_tracker::CguReuse;
29 use rustc_session::config::{self, EntryFnType};
30 use rustc_session::Session;
31 use rustc_span::symbol::sym;
32 use rustc_target::abi::{Align, VariantIdx};
34 use std::convert::TryFrom;
35 use std::ops::{Deref, DerefMut};
36 use std::time::{Duration, Instant};
38 use itertools::Itertools;
40 pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
42 hir::BinOpKind::Eq => IntPredicate::IntEQ,
43 hir::BinOpKind::Ne => IntPredicate::IntNE,
44 hir::BinOpKind::Lt => {
51 hir::BinOpKind::Le => {
58 hir::BinOpKind::Gt => {
65 hir::BinOpKind::Ge => {
73 "comparison_op_to_icmp_predicate: expected comparison operator, \
80 pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
82 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
83 hir::BinOpKind::Ne => RealPredicate::RealUNE,
84 hir::BinOpKind::Lt => RealPredicate::RealOLT,
85 hir::BinOpKind::Le => RealPredicate::RealOLE,
86 hir::BinOpKind::Gt => RealPredicate::RealOGT,
87 hir::BinOpKind::Ge => RealPredicate::RealOGE,
90 "comparison_op_to_fcmp_predicate: expected comparison operator, \
98 pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
106 let signed = match t.kind() {
108 let cmp = bin_op_to_fcmp_predicate(op);
109 let cmp = bx.fcmp(cmp, lhs, rhs);
110 return bx.sext(cmp, ret_ty);
112 ty::Uint(_) => false,
114 _ => bug!("compare_simd_types: invalid SIMD type"),
117 let cmp = bin_op_to_icmp_predicate(op, signed);
118 let cmp = bx.icmp(cmp, lhs, rhs);
119 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
120 // to get the correctly sized type. This will compile to a single instruction
121 // once the IR is converted to assembly if the SIMD instruction is supported
122 // by the target architecture.
126 /// Retrieves the information we are losing (making dynamic) in an unsizing
129 /// The `old_info` argument is a bit odd. It is intended for use in an upcast,
130 /// where the new vtable for an object will be derived from the old one.
131 pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
135 old_info: Option<Bx::Value>,
138 let (source, target) =
139 cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
140 match (source.kind(), target.kind()) {
141 (&ty::Array(_, len), &ty::Slice(_)) => {
142 cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
144 (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
146 old_info.expect("unsized_info: missing old info for trait upcasting coercion");
147 if data_a.principal_def_id() == data_b.principal_def_id() {
151 // trait upcasting coercion
154 cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
156 if let Some(entry_idx) = vptr_entry_idx {
157 let ptr_ty = cx.type_i8p();
158 let ptr_align = cx.tcx().data_layout.pointer_align.abi;
159 let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
160 let gep = bx.inbounds_gep(
163 &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
165 let new_vptr = bx.load(ptr_ty, gep, ptr_align);
166 bx.nonnull_metadata(new_vptr);
167 // Vtable loads are invariant.
168 bx.set_invariant_load(new_vptr);
174 (_, &ty::Dynamic(ref data, ..)) => {
175 let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
176 cx.layout_of(cx.tcx().mk_mut_ptr(target)),
180 cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
182 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
186 /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
187 pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
192 old_info: Option<Bx::Value>,
193 ) -> (Bx::Value, Bx::Value) {
194 debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
195 match (src_ty.kind(), dst_ty.kind()) {
196 (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
197 | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
198 assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
199 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
200 (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
202 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
203 assert_eq!(def_a, def_b);
204 let src_layout = bx.cx().layout_of(src_ty);
205 let dst_layout = bx.cx().layout_of(dst_ty);
206 if src_ty == dst_ty {
207 return (src, old_info.unwrap());
209 let mut result = None;
210 for i in 0..src_layout.fields.count() {
211 let src_f = src_layout.field(bx.cx(), i);
212 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
213 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
217 assert_eq!(src_layout.size, src_f.size);
219 let dst_f = dst_layout.field(bx.cx(), i);
220 assert_ne!(src_f.ty, dst_f.ty);
221 assert_eq!(result, None);
222 result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
224 let (lldata, llextra) = result.unwrap();
225 let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
226 let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
227 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
228 (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
230 _ => bug!("unsize_ptr: called on bad types"),
234 /// Coerces `src`, which is a reference to a value of type `src_ty`,
235 /// to a value of type `dst_ty`, and stores the result in `dst`.
236 pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
238 src: PlaceRef<'tcx, Bx::Value>,
239 dst: PlaceRef<'tcx, Bx::Value>,
241 let src_ty = src.layout.ty;
242 let dst_ty = dst.layout.ty;
243 match (src_ty.kind(), dst_ty.kind()) {
244 (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
245 let (base, info) = match bx.load_operand(src).val {
246 OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
247 OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
248 OperandValue::Ref(..) => bug!(),
250 OperandValue::Pair(base, info).store(bx, dst);
253 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
254 assert_eq!(def_a, def_b);
256 for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
257 let src_f = src.project_field(bx, i);
258 let dst_f = dst.project_field(bx, i);
260 if dst_f.layout.is_zst() {
264 if src_f.layout.ty == dst_f.layout.ty {
275 coerce_unsized_into(bx, src_f, dst_f);
279 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
283 pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
289 cast_shift_rhs(bx, op, lhs, rhs)
292 fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
298 // Shifts may have any size int on the rhs
300 let mut rhs_llty = bx.cx().val_ty(rhs);
301 let mut lhs_llty = bx.cx().val_ty(lhs);
302 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
303 rhs_llty = bx.cx().element_type(rhs_llty)
305 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
306 lhs_llty = bx.cx().element_type(lhs_llty)
308 let rhs_sz = bx.cx().int_width(rhs_llty);
309 let lhs_sz = bx.cx().int_width(lhs_llty);
311 bx.trunc(rhs, lhs_llty)
312 } else if lhs_sz > rhs_sz {
313 // FIXME (#1877: If in the future shifting by negative
314 // values is no longer undefined then this is wrong.
315 bx.zext(rhs, lhs_llty)
324 /// Returns `true` if this session's target will use SEH-based unwinding.
326 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
327 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
328 /// 64-bit MinGW) instead of "full SEH".
329 pub fn wants_msvc_seh(sess: &Session) -> bool {
330 sess.target.is_like_msvc
333 pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
339 layout: TyAndLayout<'tcx>,
342 let size = layout.size.bytes();
347 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
350 pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
351 cx: &'a Bx::CodegenCx,
352 instance: Instance<'tcx>,
354 // this is an info! to allow collecting monomorphization statistics
355 // and to allow finding the last function before LLVM aborts from
357 info!("codegen_instance({})", instance);
359 mir::codegen_mir::<Bx>(cx, instance);
362 /// Creates the `main` function which will initialize the rust runtime and call
363 /// users main function.
364 pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
365 cx: &'a Bx::CodegenCx,
366 ) -> Option<Bx::Function> {
367 let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
368 let main_is_local = main_def_id.is_local();
369 let instance = Instance::mono(cx.tcx(), main_def_id);
372 // We want to create the wrapper in the same codegen unit as Rust's main
374 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
377 } else if !cx.codegen_unit().is_primary() {
378 // We want to create the wrapper only when the codegen unit is the primary one
382 let main_llfn = cx.get_fn_addr(instance);
384 let use_start_lang_item = EntryFnType::Start != entry_type;
385 let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item);
386 return Some(entry_fn);
388 fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
389 cx: &'a Bx::CodegenCx,
390 rust_main: Bx::Value,
391 rust_main_def_id: DefId,
392 use_start_lang_item: bool,
394 // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
395 // depending on whether the target needs `argc` and `argv` to be passed in.
396 let llfty = if cx.sess().target.main_needs_argc_argv {
397 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
399 cx.type_func(&[], cx.type_int())
402 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
403 // Given that `main()` has no arguments,
404 // then its return type cannot have
405 // late-bound regions, since late-bound
406 // regions must appear in the argument
408 let main_ret_ty = cx.tcx().erase_regions(main_ret_ty.no_bound_vars().unwrap());
410 let llfn = match cx.declare_c_main(llfty) {
413 // FIXME: We should be smart and show a better diagnostic here.
414 let span = cx.tcx().def_span(rust_main_def_id);
416 .struct_span_err(span, "entry symbol `main` declared multiple times")
417 .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
419 cx.sess().abort_if_errors();
424 // `main` should respect same config for frame pointer elimination as rest of code
425 cx.set_frame_pointer_type(llfn);
426 cx.apply_target_cpu_attr(llfn);
428 let llbb = Bx::append_block(&cx, llfn, "top");
429 let mut bx = Bx::build(&cx, llbb);
431 bx.insert_reference_to_gdb_debug_scripts_section_global();
433 let isize_ty = cx.type_isize();
434 let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
435 let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
437 let (start_fn, start_ty, args) = if use_start_lang_item {
438 let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
439 let start_fn = cx.get_fn_addr(
440 ty::Instance::resolve(
442 ty::ParamEnv::reveal_all(),
444 cx.tcx().intern_substs(&[main_ret_ty.into()]),
449 let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
450 (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
452 debug!("using user-defined start fn");
453 let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
454 (rust_main, start_ty, vec![arg_argc, arg_argv])
457 let result = bx.call(start_ty, start_fn, &args, None);
458 let cast = bx.intcast(result, cx.type_int(), true);
465 /// Obtain the `argc` and `argv` values to pass to the rust start function.
466 fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
467 cx: &'a Bx::CodegenCx,
469 ) -> (Bx::Value, Bx::Value) {
470 if cx.sess().target.main_needs_argc_argv {
471 // Params from native `main()` used as args for rust start function
472 let param_argc = bx.get_param(0);
473 let param_argv = bx.get_param(1);
474 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
475 let arg_argv = param_argv;
478 // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
479 let arg_argc = bx.const_int(cx.type_int(), 0);
480 let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
485 pub fn codegen_crate<B: ExtraBackendMethods>(
489 metadata: EncodedMetadata,
490 need_metadata_module: bool,
491 ) -> OngoingCodegen<B> {
492 // Skip crate items and just output metadata in -Z no-codegen mode.
493 if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
494 let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, 1);
496 ongoing_codegen.codegen_finished(tcx);
498 ongoing_codegen.check_for_errors(tcx.sess);
500 return ongoing_codegen;
503 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
505 // Run the monomorphization collector and partition the collected items into
507 let codegen_units = tcx.collect_and_partition_mono_items(()).1;
509 // Force all codegen_unit queries so they are already either red or green
510 // when compile_codegen_unit accesses them. We are not able to re-execute
511 // the codegen_unit query from just the DepNode, so an unknown color would
512 // lead to having to re-execute compile_codegen_unit, possibly
514 if tcx.dep_graph.is_fully_enabled() {
515 for cgu in codegen_units {
516 tcx.ensure().codegen_unit(cgu.name());
520 let ongoing_codegen =
521 start_async_codegen(backend.clone(), tcx, target_cpu, metadata, codegen_units.len());
522 let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
524 // Codegen an allocator shim, if necessary.
526 // If the crate doesn't have an `allocator_kind` set then there's definitely
527 // no shim to generate. Otherwise we also check our dependency graph for all
528 // our output crate types. If anything there looks like its a `Dynamic`
529 // linkage, then it's already got an allocator shim and we'll be using that
530 // one instead. If nothing exists then it's our job to generate the
532 let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
533 use rustc_middle::middle::dependency_format::Linkage;
534 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
536 let allocator_module = if any_dynamic_crate {
538 } else if let Some(kind) = tcx.allocator_kind(()) {
540 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
541 let mut module_llvm = backend.new_metadata(tcx, &llmod_id);
542 tcx.sess.time("write_allocator_module", || {
543 backend.codegen_allocator(
548 tcx.lang_items().oom().is_some(),
552 Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
557 if let Some(allocator_module) = allocator_module {
558 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
561 if need_metadata_module {
562 // Codegen the encoded metadata.
563 let metadata_cgu_name =
564 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
565 let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name);
566 tcx.sess.time("write_compressed_metadata", || {
567 backend.write_compressed_metadata(
569 &ongoing_codegen.metadata,
570 &mut metadata_llvm_module,
574 let metadata_module = ModuleCodegen {
575 name: metadata_cgu_name,
576 module_llvm: metadata_llvm_module,
577 kind: ModuleKind::Metadata,
579 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
582 // For better throughput during parallel processing by LLVM, we used to sort
583 // CGUs largest to smallest. This would lead to better thread utilization
584 // by, for example, preventing a large CGU from being processed last and
585 // having only one LLVM thread working while the rest remained idle.
587 // However, this strategy would lead to high memory usage, as it meant the
588 // LLVM-IR for all of the largest CGUs would be resident in memory at once.
590 // Instead, we can compromise by ordering CGUs such that the largest and
591 // smallest are first, second largest and smallest are next, etc. If there
592 // are large size variations, this can reduce memory usage significantly.
593 let codegen_units: Vec<_> = {
594 let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
595 sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
597 let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
598 second_half.iter().rev().interleave(first_half).copied().collect()
601 // The non-parallel compiler can only translate codegen units to LLVM IR
602 // on a single thread, leading to a staircase effect where the N LLVM
603 // threads have to wait on the single codegen threads to generate work
604 // for them. The parallel compiler does not have this restriction, so
605 // we can pre-load the LLVM queue in parallel before handing off
606 // coordination to the OnGoingCodegen scheduler.
608 // This likely is a temporary measure. Once we don't have to support the
609 // non-parallel compiler anymore, we can compile CGUs end-to-end in
610 // parallel and get rid of the complicated scheduling logic.
611 let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
612 if cfg!(parallel_compiler) {
613 tcx.sess.time("compile_first_CGU_batch", || {
614 // Try to find one CGU to compile per thread.
615 let cgus: Vec<_> = cgu_reuse
618 .filter(|&(_, reuse)| reuse == &CguReuse::No)
619 .take(tcx.sess.threads())
622 // Compile the found CGUs in parallel.
623 let start_time = Instant::now();
625 let pre_compiled_cgus = par_iter(cgus)
627 let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
632 (pre_compiled_cgus, start_time.elapsed())
635 (FxHashMap::default(), Duration::new(0, 0))
639 let mut cgu_reuse = Vec::new();
640 let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
641 let mut total_codegen_time = Duration::new(0, 0);
642 let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
644 for (i, cgu) in codegen_units.iter().enumerate() {
645 ongoing_codegen.wait_for_signal_to_codegen_item();
646 ongoing_codegen.check_for_errors(tcx.sess);
648 // Do some setup work in the first iteration
649 if pre_compiled_cgus.is_none() {
650 // Calculate the CGU reuse
651 cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
652 codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
654 // Pre compile some CGUs
655 let (compiled_cgus, codegen_time) = pre_compile_cgus(&cgu_reuse);
656 pre_compiled_cgus = Some(compiled_cgus);
657 total_codegen_time += codegen_time;
660 let cgu_reuse = cgu_reuse[i];
661 tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
666 if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
669 let start_time = Instant::now();
670 let module = backend.compile_codegen_unit(tcx, cgu.name());
671 total_codegen_time += start_time.elapsed();
674 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
675 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
676 // compilation hang on post-monomorphization errors.
677 tcx.sess.abort_if_errors();
679 submit_codegened_module_to_llvm(
681 &ongoing_codegen.coordinator_send,
687 CguReuse::PreLto => {
688 submit_pre_lto_module_to_llvm(
691 &ongoing_codegen.coordinator_send,
692 CachedModuleCodegen {
693 name: cgu.name().to_string(),
694 source: cgu.work_product(tcx),
699 CguReuse::PostLto => {
700 submit_post_lto_module_to_llvm(
702 &ongoing_codegen.coordinator_send,
703 CachedModuleCodegen {
704 name: cgu.name().to_string(),
705 source: cgu.work_product(tcx),
713 ongoing_codegen.codegen_finished(tcx);
715 // Since the main thread is sometimes blocked during codegen, we keep track
716 // -Ztime-passes output manually.
717 if tcx.sess.time_passes() {
718 let end_rss = get_resident_set_size();
720 print_time_passes_entry(
721 "codegen_to_LLVM_IR",
728 ongoing_codegen.check_for_errors(tcx.sess);
730 ongoing_codegen.into_inner()
733 /// A curious wrapper structure whose only purpose is to call `codegen_aborted`
734 /// when it's dropped abnormally.
736 /// In the process of working on rust-lang/rust#55238 a mysterious segfault was
737 /// stumbled upon. The segfault was never reproduced locally, but it was
738 /// suspected to be related to the fact that codegen worker threads were
739 /// sticking around by the time the main thread was exiting, causing issues.
741 /// This structure is an attempt to fix that issue where the `codegen_aborted`
742 /// message will block until all workers have finished. This should ensure that
743 /// even if the main codegen thread panics we'll wait for pending work to
744 /// complete before returning from the main thread, hopefully avoiding
747 /// If you see this comment in the code, then it means that this workaround
748 /// worked! We may yet one day track down the mysterious cause of that
750 struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
752 impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
753 fn into_inner(mut self) -> OngoingCodegen<B> {
754 self.0.take().unwrap()
758 impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
759 type Target = OngoingCodegen<B>;
761 fn deref(&self) -> &OngoingCodegen<B> {
762 self.0.as_ref().unwrap()
766 impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
767 fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
768 self.0.as_mut().unwrap()
772 impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
774 if let Some(codegen) = self.0.take() {
775 codegen.codegen_aborted();
781 pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
782 let exported_symbols = tcx
786 .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
788 let local_crate_name = tcx.crate_name(LOCAL_CRATE);
789 let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
790 let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
791 let windows_subsystem = subsystem.map(|subsystem| {
792 if subsystem != sym::windows && subsystem != sym::console {
793 tcx.sess.fatal(&format!(
794 "invalid windows subsystem `{}`, only \
795 `windows` and `console` are allowed",
799 subsystem.to_string()
802 // This list is used when generating the command line to pass through to
803 // system linker. The linker expects undefined symbols on the left of the
804 // command line to be defined in libraries on the right, not the other way
805 // around. For more info, see some comments in the add_used_library function
808 // In order to get this left-to-right dependency ordering, we use the reverse
809 // postorder of all crates putting the leaves at the right-most positions.
810 let used_crates = tcx
815 .filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
818 let mut info = CrateInfo {
822 compiler_builtins: None,
823 profiler_runtime: None,
824 is_no_builtins: Default::default(),
825 native_libraries: Default::default(),
826 used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
827 crate_name: Default::default(),
829 used_crate_source: Default::default(),
830 lang_item_to_crate: Default::default(),
831 missing_lang_items: Default::default(),
832 dependency_formats: tcx.dependency_formats(()),
835 let lang_items = tcx.lang_items();
837 let crates = tcx.crates(());
839 let n_crates = crates.len();
840 info.native_libraries.reserve(n_crates);
841 info.crate_name.reserve(n_crates);
842 info.used_crate_source.reserve(n_crates);
843 info.missing_lang_items.reserve(n_crates);
845 for &cnum in crates.iter() {
846 info.native_libraries
847 .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
848 info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
849 info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
850 if tcx.is_compiler_builtins(cnum) {
851 info.compiler_builtins = Some(cnum);
853 if tcx.is_profiler_runtime(cnum) {
854 info.profiler_runtime = Some(cnum);
856 if tcx.is_no_builtins(cnum) {
857 info.is_no_builtins.insert(cnum);
859 let missing = tcx.missing_lang_items(cnum);
860 for &item in missing.iter() {
861 if let Ok(id) = lang_items.require(item) {
862 info.lang_item_to_crate.insert(item, id.krate);
866 // No need to look for lang items that don't actually need to exist.
868 missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
869 info.missing_lang_items.insert(cnum, missing);
876 pub fn provide(providers: &mut Providers) {
877 providers.backend_optimization_level = |tcx, cratenum| {
878 let for_speed = match tcx.sess.opts.optimize {
879 // If globally no optimisation is done, #[optimize] has no effect.
881 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
882 // pass manager and it is likely that some module-wide passes (such as inliner or
883 // cross-function constant propagation) would ignore the `optnone` annotation we put
884 // on the functions, thus necessarily involving these functions into optimisations.
885 config::OptLevel::No => return config::OptLevel::No,
886 // If globally optimise-speed is already specified, just use that level.
887 config::OptLevel::Less => return config::OptLevel::Less,
888 config::OptLevel::Default => return config::OptLevel::Default,
889 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
890 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
892 config::OptLevel::Size => config::OptLevel::Default,
893 config::OptLevel::SizeMin => config::OptLevel::Default,
896 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
898 let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
900 attr::OptimizeAttr::None => continue,
901 attr::OptimizeAttr::Size => continue,
902 attr::OptimizeAttr::Speed => {
907 tcx.sess.opts.optimize
911 fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
912 if !tcx.dep_graph.is_fully_enabled() {
916 let work_product_id = &cgu.work_product_id();
917 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
918 // We don't have anything cached for this CGU. This can happen
919 // if the CGU did not exist in the previous session.
923 // Try to mark the CGU as green. If it we can do so, it means that nothing
924 // affecting the LLVM module has changed and we can re-use a cached version.
925 // If we compile with any kind of LTO, this means we can re-use the bitcode
926 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
927 // know that later). If we are not doing LTO, there is only one optimized
928 // version of each module, so we re-use that.
929 let dep_node = cgu.codegen_dep_node(tcx);
931 !tcx.dep_graph.dep_node_exists(&dep_node),
932 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
936 if tcx.try_mark_green(&dep_node) {
937 // We can re-use either the pre- or the post-thinlto state. If no LTO is
938 // being performed then we can use post-LTO artifacts, otherwise we must
939 // reuse pre-LTO artifacts
940 match compute_per_cgu_lto_type(
943 &tcx.sess.crate_types(),
946 ComputedLtoType::No => CguReuse::PostLto,
947 _ => CguReuse::PreLto,