1 //! Codegen the completed AST to the LLVM IR.
3 //! Some functions here, such as codegen_block and codegen_expr, return a value --
4 //! the result of the codegen to LLVM -- while others, such as codegen_fn
5 //! and mono_item, are called only for the side effect of adding a
6 //! particular definition to the LLVM IR output we're producing.
8 //! Hopefully useful general knowledge about codegen:
10 //! * There's no way to find out the `Ty` type of a Value. Doing so
11 //! would be "trying to get the eggs out of an omelette" (credit:
12 //! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
13 //! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
14 //! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
16 use crate::{ModuleCodegen, ModuleKind, CachedModuleCodegen};
18 use rustc::dep_graph::cgu_reuse_tracker::CguReuse;
19 use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
20 use rustc::middle::lang_items::StartFnLangItem;
21 use rustc::middle::weak_lang_items;
22 use rustc::mir::mono::{Stats, CodegenUnitNameBuilder};
23 use rustc::ty::{self, Ty, TyCtxt};
24 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
25 use rustc::ty::query::Providers;
26 use rustc::middle::cstore::{self, LinkagePreference};
27 use rustc::util::common::{time, print_time_passes_entry};
28 use rustc::util::profiling::ProfileCategory;
29 use rustc::session::config::{self, EntryFnType, Lto};
30 use rustc::session::Session;
31 use rustc_mir::monomorphize::item::DefPathBasedNames;
32 use rustc::util::time_graph;
33 use rustc_mir::monomorphize::Instance;
34 use rustc_mir::monomorphize::partitioning::{CodegenUnit, CodegenUnitExt};
35 use rustc::util::nodemap::FxHashMap;
36 use rustc_data_structures::indexed_vec::Idx;
37 use rustc_data_structures::sync::Lrc;
38 use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr};
39 use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
40 use crate::mir::place::PlaceRef;
41 use crate::back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm,
42 submit_post_lto_module_to_llvm};
43 use crate::{MemFlags, CrateInfo};
45 use crate::common::{RealPredicate, TypeKind, IntPredicate};
48 use crate::mono_item::MonoItem;
54 use std::ops::{Deref, DerefMut};
55 use std::time::{Instant, Duration};
61 use crate::mir::operand::OperandValue;
63 use std::marker::PhantomData;
65 pub struct StatRecorder<'a, 'tcx, Cx: 'a + CodegenMethods<'tcx>> {
69 _marker: PhantomData<&'tcx ()>,
72 impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> StatRecorder<'a, 'tcx, Cx> {
73 pub fn new(cx: &'a Cx, name: String) -> Self {
74 let istart = cx.stats().borrow().n_llvm_insns;
84 impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> Drop for StatRecorder<'a, 'tcx, Cx> {
86 if self.cx.sess().codegen_stats() {
87 let mut stats = self.cx.stats().borrow_mut();
88 let iend = stats.n_llvm_insns;
89 stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
91 // Reset LLVM insn count to avoid compound costs.
92 stats.n_llvm_insns = self.istart;
97 pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind,
101 hir::BinOpKind::Eq => IntPredicate::IntEQ,
102 hir::BinOpKind::Ne => IntPredicate::IntNE,
103 hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT },
104 hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE },
105 hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT },
106 hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE },
108 bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
115 pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
117 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
118 hir::BinOpKind::Ne => RealPredicate::RealUNE,
119 hir::BinOpKind::Lt => RealPredicate::RealOLT,
120 hir::BinOpKind::Le => RealPredicate::RealOLE,
121 hir::BinOpKind::Gt => RealPredicate::RealOGT,
122 hir::BinOpKind::Ge => RealPredicate::RealOGE,
124 bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
131 pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
139 let signed = match t.sty {
141 let cmp = bin_op_to_fcmp_predicate(op);
142 let cmp = bx.fcmp(cmp, lhs, rhs);
143 return bx.sext(cmp, ret_ty);
145 ty::Uint(_) => false,
147 _ => bug!("compare_simd_types: invalid SIMD type"),
150 let cmp = bin_op_to_icmp_predicate(op, signed);
151 let cmp = bx.icmp(cmp, lhs, rhs);
152 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
153 // to get the correctly sized type. This will compile to a single instruction
154 // once the IR is converted to assembly if the SIMD instruction is supported
155 // by the target architecture.
159 /// Retrieves the information we are losing (making dynamic) in an unsizing
162 /// The `old_info` argument is a bit funny. It is intended for use
163 /// in an upcast, where the new vtable for an object will be derived
164 /// from the old one.
165 pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
169 old_info: Option<Cx::Value>,
171 let (source, target) = cx.tcx().struct_lockstep_tails(source, target);
172 match (&source.sty, &target.sty) {
173 (&ty::Array(_, len), &ty::Slice(_)) => {
174 cx.const_usize(len.unwrap_usize(cx.tcx()))
176 (&ty::Dynamic(..), &ty::Dynamic(..)) => {
177 // For now, upcasts are limited to changes in marker
178 // traits, and hence never actually require an actual
179 // change to the vtable.
180 old_info.expect("unsized_info: missing old info for trait upcast")
182 (_, &ty::Dynamic(ref data, ..)) => {
183 let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
184 .field(cx, FAT_PTR_EXTRA);
185 cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()),
186 cx.backend_type(vtable_ptr))
188 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
194 /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
195 pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
200 ) -> (Bx::Value, Bx::Value) {
201 debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
202 match (&src_ty.sty, &dst_ty.sty) {
206 &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
207 (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
208 &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
209 assert!(bx.cx().type_is_sized(a));
210 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
211 (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
213 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
214 let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
215 assert!(bx.cx().type_is_sized(a));
216 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
217 (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
219 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
220 assert_eq!(def_a, def_b);
222 let src_layout = bx.cx().layout_of(src_ty);
223 let dst_layout = bx.cx().layout_of(dst_ty);
224 let mut result = None;
225 for i in 0..src_layout.fields.count() {
226 let src_f = src_layout.field(bx.cx(), i);
227 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
228 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
232 assert_eq!(src_layout.size, src_f.size);
234 let dst_f = dst_layout.field(bx.cx(), i);
235 assert_ne!(src_f.ty, dst_f.ty);
236 assert_eq!(result, None);
237 result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
239 let (lldata, llextra) = result.unwrap();
240 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
241 (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
242 bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)))
244 _ => bug!("unsize_thin_ptr: called on bad types"),
248 /// Coerce `src`, which is a reference to a value of type `src_ty`,
249 /// to a value of type `dst_ty` and store the result in `dst`
250 pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
252 src: PlaceRef<'tcx, Bx::Value>,
253 dst: PlaceRef<'tcx, Bx::Value>
255 let src_ty = src.layout.ty;
256 let dst_ty = dst.layout.ty;
257 let mut coerce_ptr = || {
258 let (base, info) = match bx.load_operand(src).val {
259 OperandValue::Pair(base, info) => {
260 // fat-ptr to fat-ptr unsize preserves the vtable
261 // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
262 // So we need to pointercast the base to ensure
263 // the types match up.
264 let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR);
265 (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
267 OperandValue::Immediate(base) => {
268 unsize_thin_ptr(bx, base, src_ty, dst_ty)
270 OperandValue::Ref(..) => bug!()
272 OperandValue::Pair(base, info).store(bx, dst);
274 match (&src_ty.sty, &dst_ty.sty) {
275 (&ty::Ref(..), &ty::Ref(..)) |
276 (&ty::Ref(..), &ty::RawPtr(..)) |
277 (&ty::RawPtr(..), &ty::RawPtr(..)) => {
280 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
284 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
285 assert_eq!(def_a, def_b);
287 for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
288 let src_f = src.project_field(bx, i);
289 let dst_f = dst.project_field(bx, i);
291 if dst_f.layout.is_zst() {
295 if src_f.layout.ty == dst_f.layout.ty {
296 memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align,
297 src_f.layout, MemFlags::empty());
299 coerce_unsized_into(bx, src_f, dst_f);
303 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
309 pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
315 cast_shift_rhs(bx, op, lhs, rhs)
318 fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
324 // Shifts may have any size int on the rhs
326 let mut rhs_llty = bx.cx().val_ty(rhs);
327 let mut lhs_llty = bx.cx().val_ty(lhs);
328 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
329 rhs_llty = bx.cx().element_type(rhs_llty)
331 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
332 lhs_llty = bx.cx().element_type(lhs_llty)
334 let rhs_sz = bx.cx().int_width(rhs_llty);
335 let lhs_sz = bx.cx().int_width(lhs_llty);
337 bx.trunc(rhs, lhs_llty)
338 } else if lhs_sz > rhs_sz {
339 // FIXME (#1877: If in the future shifting by negative
340 // values is no longer undefined then this is wrong.
341 bx.zext(rhs, lhs_llty)
350 /// Returns `true` if this session's target will use SEH-based unwinding.
352 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
353 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
354 /// 64-bit MinGW) instead of "full SEH".
355 pub fn wants_msvc_seh(sess: &Session) -> bool {
356 sess.target.target.options.is_like_msvc
359 pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
363 if bx.cx().val_ty(val) == bx.cx().type_i1() {
364 bx.zext(val, bx.cx().type_i8())
370 pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
373 layout: layout::TyLayout,
375 if let layout::Abi::Scalar(ref scalar) = layout.abi {
376 return to_immediate_scalar(bx, val, scalar);
381 pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
384 scalar: &layout::Scalar,
386 if scalar.is_bool() {
387 return bx.trunc(val, bx.cx().type_i1());
392 pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
398 layout: TyLayout<'tcx>,
401 let size = layout.size.bytes();
406 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
409 pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
410 cx: &'a Bx::CodegenCx,
411 instance: Instance<'tcx>,
413 let _s = if cx.sess().codegen_stats() {
414 let mut instance_name = String::new();
415 DefPathBasedNames::new(cx.tcx(), true, true)
416 .push_def_path(instance.def_id(), &mut instance_name);
417 Some(StatRecorder::new(cx, instance_name))
422 // this is an info! to allow collecting monomorphization statistics
423 // and to allow finding the last function before LLVM aborts from
425 info!("codegen_instance({})", instance);
427 let sig = instance.fn_sig(cx.tcx());
428 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
430 let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(||
431 bug!("Instance `{:?}` not already declared", instance));
433 cx.stats().borrow_mut().n_closures += 1;
435 let mir = cx.tcx().instance_mir(instance.def);
436 mir::codegen_mir::<Bx>(cx, lldecl, &mir, instance, sig);
439 /// Creates the `main` function which will initialize the rust runtime and call
440 /// users main function.
441 pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
442 cx: &'a Bx::CodegenCx
444 let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) {
445 Some((def_id, _)) => { (def_id, cx.tcx().def_span(def_id)) },
449 let instance = Instance::mono(cx.tcx(), main_def_id);
451 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
452 // We want to create the wrapper in the same codegen unit as Rust's main
457 let main_llfn = cx.get_fn(instance);
459 let et = cx.tcx().entry_fn(LOCAL_CRATE).map(|e| e.1);
461 Some(EntryFnType::Main) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, true),
462 Some(EntryFnType::Start) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, false),
463 None => {} // Do nothing.
466 fn create_entry_fn<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
467 cx: &'a Bx::CodegenCx,
469 rust_main: Bx::Value,
470 rust_main_def_id: DefId,
471 use_start_lang_item: bool,
474 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
476 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
477 // Given that `main()` has no arguments,
478 // then its return type cannot have
479 // late-bound regions, since late-bound
480 // regions must appear in the argument
482 let main_ret_ty = cx.tcx().erase_regions(
483 &main_ret_ty.no_bound_vars().unwrap(),
486 if cx.get_defined_value("main").is_some() {
487 // FIXME: We should be smart and show a better diagnostic here.
488 cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
489 .help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
491 cx.sess().abort_if_errors();
494 let llfn = cx.declare_cfn("main", llfty);
496 // `main` should respect same config for frame pointer elimination as rest of code
497 cx.set_frame_pointer_elimination(llfn);
498 cx.apply_target_cpu_attr(llfn);
500 let mut bx = Bx::new_block(&cx, llfn, "top");
502 bx.insert_reference_to_gdb_debug_scripts_section_global();
504 // Params from native main() used as args for rust start function
505 let param_argc = cx.get_param(llfn, 0);
506 let param_argv = cx.get_param(llfn, 1);
507 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
508 let arg_argv = param_argv;
510 let (start_fn, args) = if use_start_lang_item {
511 let start_def_id = cx.tcx().require_lang_item(StartFnLangItem);
512 let start_fn = callee::resolve_and_get_fn(
515 cx.tcx().intern_substs(&[main_ret_ty.into()]),
517 (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
520 debug!("using user-defined start fn");
521 (rust_main, vec![arg_argc, arg_argv])
524 let result = bx.call(start_fn, &args, None);
525 let cast = bx.intcast(result, cx.type_int(), true);
530 pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
531 pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId =
532 time_graph::TimelineId(CODEGEN_WORKER_ID);
533 pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
534 time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]);
537 pub fn codegen_crate<B: ExtraBackendMethods>(
539 tcx: TyCtxt<'a, 'tcx, 'tcx>,
540 rx: mpsc::Receiver<Box<dyn Any + Send>>
541 ) -> OngoingCodegen<B> {
543 check_for_rustc_errors_attr(tcx);
545 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
547 // Codegen the metadata.
548 tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen));
550 let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
552 Some("metadata")).as_str()
554 let metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name);
555 let metadata = time(tcx.sess, "write metadata", || {
556 backend.write_metadata(tcx, &metadata_llvm_module)
558 tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen));
560 let metadata_module = ModuleCodegen {
561 name: metadata_cgu_name,
562 module_llvm: metadata_llvm_module,
563 kind: ModuleKind::Metadata,
566 let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph {
567 Some(time_graph::TimeGraph::new())
572 // Skip crate items and just output metadata in -Z no-codegen mode.
573 if tcx.sess.opts.debugging_opts.no_codegen ||
574 !tcx.sess.opts.output_types.should_codegen() {
575 let ongoing_codegen = start_async_codegen(
583 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
584 ongoing_codegen.codegen_finished(tcx);
586 assert_and_save_dep_graph(tcx);
588 ongoing_codegen.check_for_errors(tcx.sess);
590 return ongoing_codegen;
593 // Run the monomorphization collector and partition the collected items into
595 let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
596 let codegen_units = (*codegen_units).clone();
598 // Force all codegen_unit queries so they are already either red or green
599 // when compile_codegen_unit accesses them. We are not able to re-execute
600 // the codegen_unit query from just the DepNode, so an unknown color would
601 // lead to having to re-execute compile_codegen_unit, possibly
603 if tcx.dep_graph.is_fully_enabled() {
604 for cgu in &codegen_units {
605 tcx.codegen_unit(cgu.name().clone());
609 let ongoing_codegen = start_async_codegen(
615 codegen_units.len());
616 let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
618 // Codegen an allocator shim, if necessary.
620 // If the crate doesn't have an `allocator_kind` set then there's definitely
621 // no shim to generate. Otherwise we also check our dependency graph for all
622 // our output crate types. If anything there looks like its a `Dynamic`
623 // linkage, then it's already got an allocator shim and we'll be using that
624 // one instead. If nothing exists then it's our job to generate the
626 let any_dynamic_crate = tcx.sess.dependency_formats.borrow()
629 use rustc::middle::dependency_format::Linkage;
630 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
632 let allocator_module = if any_dynamic_crate {
634 } else if let Some(kind) = *tcx.sess.allocator_kind.get() {
635 let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
637 Some("allocator")).as_str()
639 let modules = backend.new_metadata(tcx, &llmod_id);
640 time(tcx.sess, "write allocator module", || {
641 backend.codegen_allocator(tcx, &modules, kind)
646 module_llvm: modules,
647 kind: ModuleKind::Allocator,
653 if let Some(allocator_module) = allocator_module {
654 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
657 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
659 // We sort the codegen units by size. This way we can schedule work for LLVM
660 // a bit more efficiently.
661 let codegen_units = {
662 let mut codegen_units = codegen_units;
663 codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
667 let mut total_codegen_time = Duration::new(0, 0);
668 let mut all_stats = Stats::default();
670 for cgu in codegen_units.into_iter() {
671 ongoing_codegen.wait_for_signal_to_codegen_item();
672 ongoing_codegen.check_for_errors(tcx.sess);
674 let cgu_reuse = determine_cgu_reuse(tcx, &cgu);
675 tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
679 let _timing_guard = time_graph.as_ref().map(|time_graph| {
680 time_graph.start(CODEGEN_WORKER_TIMELINE,
681 CODEGEN_WORK_PACKAGE_KIND,
682 &format!("codegen {}", cgu.name()))
684 let start_time = Instant::now();
685 let stats = backend.compile_codegen_unit(tcx, *cgu.name());
686 all_stats.extend(stats);
687 total_codegen_time += start_time.elapsed();
690 CguReuse::PreLto => {
691 submit_pre_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen {
692 name: cgu.name().to_string(),
693 source: cgu.work_product(tcx),
697 CguReuse::PostLto => {
698 submit_post_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen {
699 name: cgu.name().to_string(),
700 source: cgu.work_product(tcx),
707 ongoing_codegen.codegen_finished(tcx);
709 // Since the main thread is sometimes blocked during codegen, we keep track
710 // -Ztime-passes output manually.
711 print_time_passes_entry(tcx.sess.time_passes(),
712 "codegen to LLVM IR",
715 ::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
717 symbol_names_test::report_symbol_names(tcx);
719 if tcx.sess.codegen_stats() {
720 println!("--- codegen stats ---");
721 println!("n_glues_created: {}", all_stats.n_glues_created);
722 println!("n_null_glues: {}", all_stats.n_null_glues);
723 println!("n_real_glues: {}", all_stats.n_real_glues);
725 println!("n_fns: {}", all_stats.n_fns);
726 println!("n_inlines: {}", all_stats.n_inlines);
727 println!("n_closures: {}", all_stats.n_closures);
728 println!("fn stats:");
729 all_stats.fn_stats.sort_by_key(|&(_, insns)| insns);
730 for &(ref name, insns) in all_stats.fn_stats.iter() {
731 println!("{} insns, {}", insns, *name);
735 if tcx.sess.count_llvm_insns() {
736 for (k, v) in all_stats.llvm_insns.iter() {
737 println!("{:7} {}", *v, *k);
741 ongoing_codegen.check_for_errors(tcx.sess);
743 assert_and_save_dep_graph(tcx);
744 ongoing_codegen.into_inner()
747 /// A curious wrapper structure whose only purpose is to call `codegen_aborted`
748 /// when it's dropped abnormally.
750 /// In the process of working on rust-lang/rust#55238 a mysterious segfault was
751 /// stumbled upon. The segfault was never reproduced locally, but it was
752 /// suspected to be related to the fact that codegen worker threads were
753 /// sticking around by the time the main thread was exiting, causing issues.
755 /// This structure is an attempt to fix that issue where the `codegen_aborted`
756 /// message will block until all workers have finished. This should ensure that
757 /// even if the main codegen thread panics we'll wait for pending work to
758 /// complete before returning from the main thread, hopefully avoiding
761 /// If you see this comment in the code, then it means that this workaround
762 /// worked! We may yet one day track down the mysterious cause of that
764 struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
766 impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
767 fn into_inner(mut self) -> OngoingCodegen<B> {
768 self.0.take().unwrap()
772 impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
773 type Target = OngoingCodegen<B>;
775 fn deref(&self) -> &OngoingCodegen<B> {
776 self.0.as_ref().unwrap()
780 impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
781 fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
782 self.0.as_mut().unwrap()
786 impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
788 if let Some(codegen) = self.0.take() {
789 codegen.codegen_aborted();
794 fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) {
797 || ::rustc_incremental::assert_dep_graph(tcx));
800 "serialize dep graph",
801 || ::rustc_incremental::save_dep_graph(tcx));
805 pub fn new(tcx: TyCtxt) -> CrateInfo {
806 let mut info = CrateInfo {
808 compiler_builtins: None,
809 profiler_runtime: None,
810 sanitizer_runtime: None,
811 is_no_builtins: Default::default(),
812 native_libraries: Default::default(),
813 used_libraries: tcx.native_libraries(LOCAL_CRATE),
814 link_args: tcx.link_args(LOCAL_CRATE),
815 crate_name: Default::default(),
816 used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
817 used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
818 used_crate_source: Default::default(),
819 wasm_imports: Default::default(),
820 lang_item_to_crate: Default::default(),
821 missing_lang_items: Default::default(),
823 let lang_items = tcx.lang_items();
825 let load_wasm_items = tcx.sess.crate_types.borrow()
827 .any(|c| *c != config::CrateType::Rlib) &&
828 tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown";
831 info.load_wasm_imports(tcx, LOCAL_CRATE);
834 let crates = tcx.crates();
836 let n_crates = crates.len();
837 info.native_libraries.reserve(n_crates);
838 info.crate_name.reserve(n_crates);
839 info.used_crate_source.reserve(n_crates);
840 info.missing_lang_items.reserve(n_crates);
842 for &cnum in crates.iter() {
843 info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
844 info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
845 info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
846 if tcx.is_panic_runtime(cnum) {
847 info.panic_runtime = Some(cnum);
849 if tcx.is_compiler_builtins(cnum) {
850 info.compiler_builtins = Some(cnum);
852 if tcx.is_profiler_runtime(cnum) {
853 info.profiler_runtime = Some(cnum);
855 if tcx.is_sanitizer_runtime(cnum) {
856 info.sanitizer_runtime = Some(cnum);
858 if tcx.is_no_builtins(cnum) {
859 info.is_no_builtins.insert(cnum);
862 info.load_wasm_imports(tcx, cnum);
864 let missing = tcx.missing_lang_items(cnum);
865 for &item in missing.iter() {
866 if let Ok(id) = lang_items.require(item) {
867 info.lang_item_to_crate.insert(item, id.krate);
871 // No need to look for lang items that are whitelisted and don't
872 // actually need to exist.
873 let missing = missing.iter()
875 .filter(|&l| !weak_lang_items::whitelisted(tcx, l))
877 info.missing_lang_items.insert(cnum, missing);
883 fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) {
884 self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| {
885 let instance = Instance::mono(tcx, id);
886 let import_name = tcx.symbol_name(instance);
888 (import_name.to_string(), module.clone())
893 fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool {
894 let (all_mono_items, _) =
895 tcx.collect_and_partition_mono_items(LOCAL_CRATE);
896 all_mono_items.contains(&id)
899 pub fn provide_both(providers: &mut Providers) {
900 providers.backend_optimization_level = |tcx, cratenum| {
901 let for_speed = match tcx.sess.opts.optimize {
902 // If globally no optimisation is done, #[optimize] has no effect.
904 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
905 // pass manager and it is likely that some module-wide passes (such as inliner or
906 // cross-function constant propagation) would ignore the `optnone` annotation we put
907 // on the functions, thus necessarily involving these functions into optimisations.
908 config::OptLevel::No => return config::OptLevel::No,
909 // If globally optimise-speed is already specified, just use that level.
910 config::OptLevel::Less => return config::OptLevel::Less,
911 config::OptLevel::Default => return config::OptLevel::Default,
912 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
913 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
915 config::OptLevel::Size => config::OptLevel::Default,
916 config::OptLevel::SizeMin => config::OptLevel::Default,
919 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
921 let hir::CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
923 attr::OptimizeAttr::None => continue,
924 attr::OptimizeAttr::Size => continue,
925 attr::OptimizeAttr::Speed => {
930 return tcx.sess.opts.optimize;
933 providers.dllimport_foreign_items = |tcx, krate| {
934 let module_map = tcx.foreign_modules(krate);
935 let module_map = module_map.iter()
936 .map(|lib| (lib.def_id, lib))
937 .collect::<FxHashMap<_, _>>();
939 let dllimports = tcx.native_libraries(krate)
942 if lib.kind != cstore::NativeLibraryKind::NativeUnknown {
945 let cfg = match lib.cfg {
946 Some(ref cfg) => cfg,
949 attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
951 .filter_map(|lib| lib.foreign_module)
952 .map(|id| &module_map[&id])
953 .flat_map(|module| module.foreign_items.iter().cloned())
958 providers.is_dllimport_foreign_item = |tcx, def_id| {
959 tcx.dllimport_foreign_items(def_id.krate).contains(&def_id)
963 fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
964 cgu: &CodegenUnit<'tcx>)
966 if !tcx.dep_graph.is_fully_enabled() {
970 let work_product_id = &cgu.work_product_id();
971 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
972 // We don't have anything cached for this CGU. This can happen
973 // if the CGU did not exist in the previous session.
977 // Try to mark the CGU as green. If it we can do so, it means that nothing
978 // affecting the LLVM module has changed and we can re-use a cached version.
979 // If we compile with any kind of LTO, this means we can re-use the bitcode
980 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
981 // know that later). If we are not doing LTO, there is only one optimized
982 // version of each module, so we re-use that.
983 let dep_node = cgu.codegen_dep_node(tcx);
984 assert!(!tcx.dep_graph.dep_node_exists(&dep_node),
985 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
988 if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
989 // We can re-use either the pre- or the post-thinlto state
990 if tcx.sess.lto() != Lto::No {