1 use super::link::{self, remove};
2 use super::linker::LinkerInfo;
3 use super::lto::{self, SerializedModule};
4 use super::symbol_export::symbol_name_for_instance_in_crate;
7 CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
11 use jobserver::{Acquired, Client};
13 use rustc_data_structures::fx::FxHashMap;
14 use rustc_data_structures::profiling::SelfProfilerRef;
15 use rustc_data_structures::profiling::TimingGuard;
16 use rustc_data_structures::profiling::VerboseTimingGuard;
17 use rustc_data_structures::svh::Svh;
18 use rustc_data_structures::sync::Lrc;
19 use rustc_errors::emitter::Emitter;
20 use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
21 use rustc_fs_util::link_or_copy;
22 use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
23 use rustc_incremental::{
24 copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
26 use rustc_middle::dep_graph::{WorkProduct, WorkProductFileKind, WorkProductId};
27 use rustc_middle::middle::cstore::EncodedMetadata;
28 use rustc_middle::middle::exported_symbols::SymbolExportLevel;
29 use rustc_middle::ty::TyCtxt;
30 use rustc_session::cgu_reuse_tracker::CguReuseTracker;
31 use rustc_session::config::{
32 self, Lto, OutputFilenames, OutputType, Passes, Sanitizer, SwitchWithOptPath,
34 use rustc_session::Session;
35 use rustc_span::hygiene::ExpnId;
36 use rustc_span::source_map::SourceMap;
37 use rustc_span::symbol::{sym, Symbol};
38 use rustc_target::spec::{MergeFunctions, PanicStrategy};
44 use std::path::{Path, PathBuf};
46 use std::sync::mpsc::{channel, Receiver, Sender};
50 const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
52 /// What kind of object file to emit.
53 #[derive(Clone, Copy, PartialEq)]
58 // Just uncompressed llvm bitcode. Provides easy compatibility with
59 // emscripten's ecc compiler, when used as the linker.
62 // Object code, possibly augmented with a bitcode section.
63 ObjectCode(BitcodeSection),
66 /// What kind of llvm bitcode section to embed in an object file.
67 #[derive(Clone, Copy, PartialEq)]
68 pub enum BitcodeSection {
69 // No bitcode section.
72 // An empty bitcode section (to placate tools such as the iOS linker that
73 // require this section even if they don't use it).
76 // A full, uncompressed bitcode section.
80 /// Module-specific configuration for `optimize_and_codegen`.
81 pub struct ModuleConfig {
82 /// Names of additional optimization passes to run.
83 pub passes: Vec<String>,
84 /// Some(level) to optimize at a certain level, or None to run
85 /// absolutely no optimizations (used for the metadata module).
86 pub opt_level: Option<config::OptLevel>,
88 /// Some(level) to optimize binary size, or None to not affect program size.
89 pub opt_size: Option<config::OptLevel>,
91 pub pgo_gen: SwitchWithOptPath,
92 pub pgo_use: Option<PathBuf>,
94 pub sanitizer: Option<Sanitizer>,
95 pub sanitizer_recover: Vec<Sanitizer>,
96 pub sanitizer_memory_track_origins: usize,
98 // Flags indicating which outputs to produce.
99 pub emit_pre_lto_bc: bool,
100 pub emit_no_opt_bc: bool,
104 pub emit_obj: EmitObj,
106 // Miscellaneous flags. These are mostly copied from command-line
108 pub verify_llvm_ir: bool,
109 pub no_prepopulate_passes: bool,
110 pub no_builtins: bool,
111 pub time_module: bool,
112 pub vectorize_loop: bool,
113 pub vectorize_slp: bool,
114 pub merge_functions: bool,
115 pub inline_threshold: Option<usize>,
116 pub new_llvm_pass_manager: bool,
124 is_compiler_builtins: bool,
126 // If it's a regular module, use `$regular`, otherwise use `$other`.
127 // `$regular` and `$other` are evaluated lazily.
128 macro_rules! if_regular {
129 ($regular: expr, $other: expr) => {
130 if let ModuleKind::Regular = kind { $regular } else { $other }
134 let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
136 let save_temps = sess.opts.cg.save_temps;
138 let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
140 ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
141 ModuleKind::Allocator => false,
142 ModuleKind::Metadata => sess.opts.output_types.contains_key(&OutputType::Metadata),
145 let emit_obj = if !should_emit_obj {
147 } else if sess.target.target.options.obj_is_bitcode
148 || sess.opts.cg.linker_plugin_lto.enabled()
151 } else if sess.opts.debugging_opts.embed_bitcode || need_crate_bitcode_for_rlib(sess) {
152 let force_full = need_crate_bitcode_for_rlib(sess);
153 match sess.opts.optimize {
154 config::OptLevel::No | config::OptLevel::Less if !force_full => {
155 EmitObj::ObjectCode(BitcodeSection::Marker)
157 _ => EmitObj::ObjectCode(BitcodeSection::Full),
160 EmitObj::ObjectCode(BitcodeSection::None)
166 let mut passes = sess.opts.cg.passes.clone();
167 // compiler_builtins overrides the codegen-units settings,
168 // which is incompatible with -Zprofile which requires that
169 // only a single codegen unit is used per crate.
170 if sess.opts.debugging_opts.profile && !is_compiler_builtins {
171 passes.push("insert-gcov-profiling".to_owned());
178 opt_level: opt_level_and_size,
179 opt_size: opt_level_and_size,
181 pgo_gen: if_regular!(
182 sess.opts.cg.profile_generate.clone(),
183 SwitchWithOptPath::Disabled
185 pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
187 sanitizer: if_regular!(sess.opts.debugging_opts.sanitizer.clone(), None),
188 sanitizer_recover: if_regular!(
189 sess.opts.debugging_opts.sanitizer_recover.clone(),
192 sanitizer_memory_track_origins: if_regular!(
193 sess.opts.debugging_opts.sanitizer_memory_track_origins,
197 emit_pre_lto_bc: if_regular!(
198 save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
201 emit_no_opt_bc: if_regular!(save_temps, false),
202 emit_bc: if_regular!(
203 save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
206 emit_ir: if_regular!(
207 sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
210 emit_asm: if_regular!(
211 sess.opts.output_types.contains_key(&OutputType::Assembly),
216 verify_llvm_ir: sess.verify_llvm_ir(),
217 no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
218 no_builtins: no_builtins || sess.target.target.options.no_builtins,
220 // Exclude metadata and allocator modules from time_passes output,
221 // since they throw off the "LLVM passes" measurement.
222 time_module: if_regular!(true, false),
224 // Copy what clang does by turning on loop vectorization at O2 and
225 // slp vectorization at O3.
226 vectorize_loop: !sess.opts.cg.no_vectorize_loops
227 && (sess.opts.optimize == config::OptLevel::Default
228 || sess.opts.optimize == config::OptLevel::Aggressive),
229 vectorize_slp: !sess.opts.cg.no_vectorize_slp
230 && sess.opts.optimize == config::OptLevel::Aggressive,
232 // Some targets (namely, NVPTX) interact badly with the
233 // MergeFunctions pass. This is because MergeFunctions can generate
234 // new function calls which may interfere with the target calling
235 // convention; e.g. for the NVPTX target, PTX kernels should not
236 // call other PTX kernels. MergeFunctions can also be configured to
237 // generate aliases instead, but aliases are not supported by some
238 // backends (again, NVPTX). Therefore, allow targets to opt out of
239 // the MergeFunctions pass, but otherwise keep the pass enabled (at
240 // O2 and O3) since it can be useful for reducing code size.
241 merge_functions: match sess
245 .unwrap_or(sess.target.target.options.merge_functions)
247 MergeFunctions::Disabled => false,
248 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
249 sess.opts.optimize == config::OptLevel::Default
250 || sess.opts.optimize == config::OptLevel::Aggressive
254 inline_threshold: sess.opts.cg.inline_threshold,
255 new_llvm_pass_manager: sess.opts.debugging_opts.new_llvm_pass_manager,
259 pub fn bitcode_needed(&self) -> bool {
261 || self.emit_obj == EmitObj::Bitcode
262 || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
266 // HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`.
267 pub struct TargetMachineFactory<B: WriteBackendMethods>(
268 pub Arc<dyn Fn() -> Result<B::TargetMachine, String> + Send + Sync>,
271 impl<B: WriteBackendMethods> Clone for TargetMachineFactory<B> {
272 fn clone(&self) -> Self {
273 TargetMachineFactory(self.0.clone())
277 pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportLevel)>>>;
279 /// Additional resources used by optimize_and_codegen (not module specific)
281 pub struct CodegenContext<B: WriteBackendMethods> {
282 // Resources needed when running LTO
284 pub prof: SelfProfilerRef,
286 pub no_landing_pads: bool,
287 pub save_temps: bool,
288 pub fewer_names: bool,
289 pub exported_symbols: Option<Arc<ExportedSymbols>>,
290 pub opts: Arc<config::Options>,
291 pub crate_types: Vec<config::CrateType>,
292 pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
293 pub output_filenames: Arc<OutputFilenames>,
294 pub regular_module_config: Arc<ModuleConfig>,
295 pub metadata_module_config: Arc<ModuleConfig>,
296 pub allocator_module_config: Arc<ModuleConfig>,
297 pub tm_factory: TargetMachineFactory<B>,
298 pub msvc_imps_needed: bool,
299 pub target_pointer_width: String,
300 pub target_arch: String,
301 pub debuginfo: config::DebugInfo,
303 // Number of cgus excluding the allocator/metadata modules
304 pub total_cgus: usize,
305 // Handler to use for diagnostics produced during codegen.
306 pub diag_emitter: SharedEmitter,
307 // LLVM optimizations for which we want to print remarks.
309 // Worker thread number
311 // The incremental compilation session directory, or None if we are not
312 // compiling incrementally
313 pub incr_comp_session_dir: Option<PathBuf>,
314 // Used to update CGU re-use information during the thinlto phase.
315 pub cgu_reuse_tracker: CguReuseTracker,
316 // Channel back to the main control thread to send messages to
317 pub coordinator_send: Sender<Box<dyn Any + Send>>,
320 impl<B: WriteBackendMethods> CodegenContext<B> {
321 pub fn create_diag_handler(&self) -> Handler {
322 Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
325 pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
327 ModuleKind::Regular => &self.regular_module_config,
328 ModuleKind::Metadata => &self.metadata_module_config,
329 ModuleKind::Allocator => &self.allocator_module_config,
334 fn generate_lto_work<B: ExtraBackendMethods>(
335 cgcx: &CodegenContext<B>,
336 needs_fat_lto: Vec<FatLTOInput<B>>,
337 needs_thin_lto: Vec<(String, B::ThinBuffer)>,
338 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
339 ) -> Vec<(WorkItem<B>, u64)> {
340 let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
342 let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
343 assert!(needs_thin_lto.is_empty());
345 B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
346 (vec![lto_module], vec![])
348 assert!(needs_fat_lto.is_empty());
349 B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
355 let cost = module.cost();
356 (WorkItem::LTO(module), cost)
358 .chain(copy_jobs.into_iter().map(|wp| {
360 WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
361 name: wp.cgu_name.clone(),
370 pub struct CompiledModules {
371 pub modules: Vec<CompiledModule>,
372 pub metadata_module: Option<CompiledModule>,
373 pub allocator_module: Option<CompiledModule>,
376 fn need_crate_bitcode_for_rlib(sess: &Session) -> bool {
377 sess.opts.cg.bitcode_in_rlib
378 && sess.crate_types.borrow().contains(&config::CrateType::Rlib)
379 && sess.opts.output_types.contains_key(&OutputType::Exe)
382 fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
383 if sess.opts.incremental.is_none() {
389 Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
393 pub fn start_async_codegen<B: ExtraBackendMethods>(
396 metadata: EncodedMetadata,
398 ) -> OngoingCodegen<B> {
399 let (coordinator_send, coordinator_receive) = channel();
402 let crate_name = tcx.crate_name(LOCAL_CRATE);
403 let crate_hash = tcx.crate_hash(LOCAL_CRATE);
404 let no_builtins = attr::contains_name(&tcx.hir().krate().item.attrs, sym::no_builtins);
405 let is_compiler_builtins =
406 attr::contains_name(&tcx.hir().krate().item.attrs, sym::compiler_builtins);
408 attr::first_attr_value_str_by_name(&tcx.hir().krate().item.attrs, sym::windows_subsystem);
409 let windows_subsystem = subsystem.map(|subsystem| {
410 if subsystem != sym::windows && subsystem != sym::console {
411 tcx.sess.fatal(&format!(
412 "invalid windows subsystem `{}`, only \
413 `windows` and `console` are allowed",
417 subsystem.to_string()
420 let linker_info = LinkerInfo::new(tcx);
421 let crate_info = CrateInfo::new(tcx);
424 ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
425 let metadata_config =
426 ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
427 let allocator_config =
428 ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
430 let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
431 let (codegen_worker_send, codegen_worker_receive) = channel();
433 let coordinator_thread = start_executing_work(
441 sess.jobserver.clone(),
442 Arc::new(regular_config),
443 Arc::new(metadata_config),
444 Arc::new(allocator_config),
445 coordinator_send.clone(),
458 codegen_worker_receive,
460 future: coordinator_thread,
461 output_filenames: tcx.output_filenames(LOCAL_CRATE),
465 fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
467 compiled_modules: &CompiledModules,
468 ) -> FxHashMap<WorkProductId, WorkProduct> {
469 let mut work_products = FxHashMap::default();
471 if sess.opts.incremental.is_none() {
472 return work_products;
475 let _timer = sess.timer("incr_comp_copy_cgu_workproducts");
477 for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
478 let mut files = vec![];
480 if let Some(ref path) = module.object {
481 files.push((WorkProductFileKind::Object, path.clone()));
483 if let Some(ref path) = module.bytecode {
484 files.push((WorkProductFileKind::Bytecode, path.clone()));
487 if let Some((id, product)) =
488 copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files)
490 work_products.insert(id, product);
497 fn produce_final_output_artifacts(
499 compiled_modules: &CompiledModules,
500 crate_output: &OutputFilenames,
502 let mut user_wants_bitcode = false;
503 let mut user_wants_objects = false;
505 // Produce final compile outputs.
506 let copy_gracefully = |from: &Path, to: &Path| {
507 if let Err(e) = fs::copy(from, to) {
508 sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
512 let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
513 if compiled_modules.modules.len() == 1 {
514 // 1) Only one codegen unit. In this case it's no difficulty
515 // to copy `foo.0.x` to `foo.x`.
516 let module_name = Some(&compiled_modules.modules[0].name[..]);
517 let path = crate_output.temp_path(output_type, module_name);
518 copy_gracefully(&path, &crate_output.path(output_type));
519 if !sess.opts.cg.save_temps && !keep_numbered {
520 // The user just wants `foo.x`, not `foo.#module-name#.x`.
524 let ext = crate_output
525 .temp_path(output_type, None)
532 if crate_output.outputs.contains_key(&output_type) {
533 // 2) Multiple codegen units, with `--emit foo=some_name`. We have
534 // no good solution for this case, so warn the user.
536 "ignoring emit path because multiple .{} files \
540 } else if crate_output.single_output_file.is_some() {
541 // 3) Multiple codegen units, with `-o some_name`. We have
542 // no good solution for this case, so warn the user.
544 "ignoring -o because multiple .{} files \
549 // 4) Multiple codegen units, but no explicit name. We
550 // just leave the `foo.0.x` files in place.
551 // (We don't have to do any work in this case.)
556 // Flag to indicate whether the user explicitly requested bitcode.
557 // Otherwise, we produced it only as a temporary output, and will need
559 for output_type in crate_output.outputs.keys() {
561 OutputType::Bitcode => {
562 user_wants_bitcode = true;
563 // Copy to .bc, but always keep the .0.bc. There is a later
564 // check to figure out if we should delete .0.bc files, or keep
565 // them for making an rlib.
566 copy_if_one_unit(OutputType::Bitcode, true);
568 OutputType::LlvmAssembly => {
569 copy_if_one_unit(OutputType::LlvmAssembly, false);
571 OutputType::Assembly => {
572 copy_if_one_unit(OutputType::Assembly, false);
574 OutputType::Object => {
575 user_wants_objects = true;
576 copy_if_one_unit(OutputType::Object, true);
578 OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
582 // Clean up unwanted temporary files.
584 // We create the following files by default:
585 // - #crate#.#module-name#.bc
586 // - #crate#.#module-name#.o
587 // - #crate#.crate.metadata.bc
588 // - #crate#.crate.metadata.o
589 // - #crate#.o (linked from crate.##.o)
590 // - #crate#.bc (copied from crate.##.bc)
591 // We may create additional files if requested by the user (through
592 // `-C save-temps` or `--emit=` flags).
594 if !sess.opts.cg.save_temps {
595 // Remove the temporary .#module-name#.o objects. If the user didn't
596 // explicitly request bitcode (with --emit=bc), and the bitcode is not
597 // needed for building an rlib, then we must remove .#module-name#.bc as
600 // Specific rules for keeping .#module-name#.bc:
601 // - If the user requested bitcode (`user_wants_bitcode`), and
602 // codegen_units > 1, then keep it.
603 // - If the user requested bitcode but codegen_units == 1, then we
604 // can toss .#module-name#.bc because we copied it to .bc earlier.
605 // - If we're not building an rlib and the user didn't request
606 // bitcode, then delete .#module-name#.bc.
607 // If you change how this works, also update back::link::link_rlib,
608 // where .#module-name#.bc files are (maybe) deleted after making an
610 let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
612 let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
614 let keep_numbered_objects =
615 needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
617 for module in compiled_modules.modules.iter() {
618 if let Some(ref path) = module.object {
619 if !keep_numbered_objects {
624 if let Some(ref path) = module.bytecode {
625 if !keep_numbered_bitcode {
631 if !user_wants_bitcode {
632 if let Some(ref metadata_module) = compiled_modules.metadata_module {
633 if let Some(ref path) = metadata_module.bytecode {
638 if let Some(ref allocator_module) = compiled_modules.allocator_module {
639 if let Some(ref path) = allocator_module.bytecode {
646 // We leave the following files around by default:
648 // - #crate#.crate.metadata.o
650 // These are used in linking steps and will be cleaned up afterward.
653 pub fn dump_incremental_data(_codegen_results: &CodegenResults) {
654 // FIXME(mw): This does not work at the moment because the situation has
655 // become more complicated due to incremental LTO. Now a CGU
656 // can have more than two caching states.
657 // println!("[incremental] Re-using {} out of {} modules",
658 // codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
659 // codegen_results.modules.len());
662 pub enum WorkItem<B: WriteBackendMethods> {
663 /// Optimize a newly codegened, totally unoptimized module.
664 Optimize(ModuleCodegen<B::Module>),
665 /// Copy the post-LTO artifacts from the incremental cache to the output
667 CopyPostLtoArtifacts(CachedModuleCodegen),
668 /// Performs (Thin)LTO on the given module.
669 LTO(lto::LtoModuleCodegen<B>),
672 impl<B: WriteBackendMethods> WorkItem<B> {
673 pub fn module_kind(&self) -> ModuleKind {
675 WorkItem::Optimize(ref m) => m.kind,
676 WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
680 fn start_profiling<'a>(&self, cgcx: &'a CodegenContext<B>) -> TimingGuard<'a> {
682 WorkItem::Optimize(ref m) => {
683 cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &m.name[..])
685 WorkItem::CopyPostLtoArtifacts(ref m) => cgcx
687 .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &m.name[..]),
688 WorkItem::LTO(ref m) => {
689 cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name())
695 enum WorkItemResult<B: WriteBackendMethods> {
696 Compiled(CompiledModule),
697 NeedsFatLTO(FatLTOInput<B>),
698 NeedsThinLTO(String, B::ThinBuffer),
701 pub enum FatLTOInput<B: WriteBackendMethods> {
702 Serialized { name: String, buffer: B::ModuleBuffer },
703 InMemory(ModuleCodegen<B::Module>),
706 fn execute_work_item<B: ExtraBackendMethods>(
707 cgcx: &CodegenContext<B>,
708 work_item: WorkItem<B>,
709 ) -> Result<WorkItemResult<B>, FatalError> {
710 let module_config = cgcx.config(work_item.module_kind());
713 WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
714 WorkItem::CopyPostLtoArtifacts(module) => {
715 execute_copy_from_cache_work_item(cgcx, module, module_config)
717 WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
721 // Actual LTO type we end up choosing based on multiple factors.
722 enum ComputedLtoType {
728 fn execute_optimize_work_item<B: ExtraBackendMethods>(
729 cgcx: &CodegenContext<B>,
730 module: ModuleCodegen<B::Module>,
731 module_config: &ModuleConfig,
732 ) -> Result<WorkItemResult<B>, FatalError> {
733 let diag_handler = cgcx.create_diag_handler();
736 B::optimize(cgcx, &diag_handler, &module, module_config)?;
739 // After we've done the initial round of optimizations we need to
740 // decide whether to synchronously codegen this module or ship it
741 // back to the coordinator thread for further LTO processing (which
742 // has to wait for all the initial modules to be optimized).
744 // If the linker does LTO, we don't have to do it. Note that we
745 // keep doing full LTO, if it is requested, as not to break the
746 // assumption that the output will be a single module.
747 let linker_does_lto = cgcx.opts.cg.linker_plugin_lto.enabled();
749 // When we're automatically doing ThinLTO for multi-codegen-unit
750 // builds we don't actually want to LTO the allocator modules if
751 // it shows up. This is due to various linker shenanigans that
752 // we'll encounter later.
753 let is_allocator = module.kind == ModuleKind::Allocator;
755 // We ignore a request for full crate grath LTO if the cate type
756 // is only an rlib, as there is no full crate graph to process,
757 // that'll happen later.
759 // This use case currently comes up primarily for targets that
760 // require LTO so the request for LTO is always unconditionally
761 // passed down to the backend, but we don't actually want to do
762 // anything about it yet until we've got a final product.
763 let is_rlib = cgcx.crate_types.len() == 1 && cgcx.crate_types[0] == config::CrateType::Rlib;
765 // Metadata modules never participate in LTO regardless of the lto
767 let lto_type = if module.kind == ModuleKind::Metadata {
771 Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
772 Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
773 Lto::Fat if !is_rlib => ComputedLtoType::Fat,
774 _ => ComputedLtoType::No,
778 // If we're doing some form of incremental LTO then we need to be sure to
779 // save our module to disk first.
780 let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
781 let filename = pre_lto_bitcode_filename(&module.name);
782 cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
788 ComputedLtoType::No => {
789 let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
790 WorkItemResult::Compiled(module)
792 ComputedLtoType::Thin => {
793 let (name, thin_buffer) = B::prepare_thin(module);
794 if let Some(path) = bitcode {
795 fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
796 panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
799 WorkItemResult::NeedsThinLTO(name, thin_buffer)
801 ComputedLtoType::Fat => match bitcode {
803 let (name, buffer) = B::serialize_module(module);
804 fs::write(&path, buffer.data()).unwrap_or_else(|e| {
805 panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
807 WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer })
809 None => WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module)),
814 fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
815 cgcx: &CodegenContext<B>,
816 module: CachedModuleCodegen,
817 module_config: &ModuleConfig,
818 ) -> Result<WorkItemResult<B>, FatalError> {
819 let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
820 let mut object = None;
821 let mut bytecode = None;
822 for (kind, saved_file) in &module.source.saved_files {
823 let obj_out = match kind {
824 WorkProductFileKind::Object => {
825 let path = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name));
826 object = Some(path.clone());
829 WorkProductFileKind::Bytecode => {
830 let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(&module.name));
831 bytecode = Some(path.clone());
835 let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
837 "copying pre-existing module `{}` from {:?} to {}",
842 if let Err(err) = link_or_copy(&source_file, &obj_out) {
843 let diag_handler = cgcx.create_diag_handler();
844 diag_handler.err(&format!(
845 "unable to copy {} to {}: {}",
846 source_file.display(),
853 assert_eq!(object.is_some(), module_config.emit_obj != EmitObj::None);
854 assert_eq!(bytecode.is_some(), module_config.emit_bc);
856 Ok(WorkItemResult::Compiled(CompiledModule {
858 kind: ModuleKind::Regular,
864 fn execute_lto_work_item<B: ExtraBackendMethods>(
865 cgcx: &CodegenContext<B>,
866 mut module: lto::LtoModuleCodegen<B>,
867 module_config: &ModuleConfig,
868 ) -> Result<WorkItemResult<B>, FatalError> {
869 let diag_handler = cgcx.create_diag_handler();
872 let module = module.optimize(cgcx)?;
873 let module = B::codegen(cgcx, &diag_handler, module, module_config)?;
874 Ok(WorkItemResult::Compiled(module))
878 pub enum Message<B: WriteBackendMethods> {
879 Token(io::Result<Acquired>),
881 result: FatLTOInput<B>,
886 thin_buffer: B::ThinBuffer,
890 result: Result<CompiledModule, Option<WorkerFatalError>>,
894 llvm_work_item: WorkItem<B>,
897 AddImportOnlyModule {
898 module_data: SerializedModule<B::ModuleBuffer>,
899 work_product: WorkProduct,
908 code: Option<DiagnosticId>,
912 #[derive(PartialEq, Clone, Copy, Debug)]
913 enum MainThreadWorkerState {
919 fn start_executing_work<B: ExtraBackendMethods>(
922 crate_info: &CrateInfo,
923 shared_emitter: SharedEmitter,
924 codegen_worker_send: Sender<Message<B>>,
925 coordinator_receive: Receiver<Box<dyn Any + Send>>,
928 regular_config: Arc<ModuleConfig>,
929 metadata_config: Arc<ModuleConfig>,
930 allocator_config: Arc<ModuleConfig>,
931 tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
932 ) -> thread::JoinHandle<Result<CompiledModules, ()>> {
933 let coordinator_send = tx_to_llvm_workers;
936 // Compute the set of symbols we need to retain when doing LTO (if we need to)
937 let exported_symbols = {
938 let mut exported_symbols = FxHashMap::default();
940 let copy_symbols = |cnum| {
942 .exported_symbols(cnum)
944 .map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
952 exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
953 Some(Arc::new(exported_symbols))
955 Lto::Fat | Lto::Thin => {
956 exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
957 for &cnum in tcx.crates().iter() {
958 exported_symbols.insert(cnum, copy_symbols(cnum));
960 Some(Arc::new(exported_symbols))
965 // First up, convert our jobserver into a helper thread so we can use normal
966 // mpsc channels to manage our messages and such.
967 // After we've requested tokens then we'll, when we can,
968 // get tokens on `coordinator_receive` which will
969 // get managed in the main loop below.
970 let coordinator_send2 = coordinator_send.clone();
971 let helper = jobserver
972 .into_helper_thread(move |token| {
973 drop(coordinator_send2.send(Box::new(Message::Token::<B>(token))));
975 .expect("failed to spawn helper thread");
977 let mut each_linked_rlib_for_lto = Vec::new();
978 drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
979 if link::ignored_for_lto(sess, crate_info, cnum) {
982 each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
985 let ol = if tcx.sess.opts.debugging_opts.no_codegen
986 || !tcx.sess.opts.output_types.should_codegen()
988 // If we know that we won’t be doing codegen, create target machines without optimisation.
991 tcx.backend_optimization_level(LOCAL_CRATE)
993 let cgcx = CodegenContext::<B> {
994 backend: backend.clone(),
995 crate_types: sess.crate_types.borrow().clone(),
996 each_linked_rlib_for_lto,
998 no_landing_pads: sess.panic_strategy() == PanicStrategy::Abort,
999 fewer_names: sess.fewer_names(),
1000 save_temps: sess.opts.cg.save_temps,
1001 opts: Arc::new(sess.opts.clone()),
1002 prof: sess.prof.clone(),
1004 remark: sess.opts.cg.remark.clone(),
1006 incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1007 cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
1009 diag_emitter: shared_emitter.clone(),
1010 output_filenames: tcx.output_filenames(LOCAL_CRATE),
1011 regular_module_config: regular_config,
1012 metadata_module_config: metadata_config,
1013 allocator_module_config: allocator_config,
1014 tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol)),
1016 msvc_imps_needed: msvc_imps_needed(tcx),
1017 target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
1018 target_arch: tcx.sess.target.target.arch.clone(),
1019 debuginfo: tcx.sess.opts.debuginfo,
1022 // This is the "main loop" of parallel work happening for parallel codegen.
1023 // It's here that we manage parallelism, schedule work, and work with
1024 // messages coming from clients.
1026 // There are a few environmental pre-conditions that shape how the system
1029 // - Error reporting only can happen on the main thread because that's the
1030 // only place where we have access to the compiler `Session`.
1031 // - LLVM work can be done on any thread.
1032 // - Codegen can only happen on the main thread.
1033 // - Each thread doing substantial work most be in possession of a `Token`
1034 // from the `Jobserver`.
1035 // - The compiler process always holds one `Token`. Any additional `Tokens`
1036 // have to be requested from the `Jobserver`.
1040 // The error reporting restriction is handled separately from the rest: We
1041 // set up a `SharedEmitter` the holds an open channel to the main thread.
1042 // When an error occurs on any thread, the shared emitter will send the
1043 // error message to the receiver main thread (`SharedEmitterMain`). The
1044 // main thread will periodically query this error message queue and emit
1045 // any error messages it has received. It might even abort compilation if
1046 // has received a fatal error. In this case we rely on all other threads
1047 // being torn down automatically with the main thread.
1048 // Since the main thread will often be busy doing codegen work, error
1049 // reporting will be somewhat delayed, since the message queue can only be
1050 // checked in between to work packages.
1052 // Work Processing Infrastructure
1053 // ==============================
1054 // The work processing infrastructure knows three major actors:
1056 // - the coordinator thread,
1057 // - the main thread, and
1058 // - LLVM worker threads
1060 // The coordinator thread is running a message loop. It instructs the main
1061 // thread about what work to do when, and it will spawn off LLVM worker
1062 // threads as open LLVM WorkItems become available.
1064 // The job of the main thread is to codegen CGUs into LLVM work package
1065 // (since the main thread is the only thread that can do this). The main
1066 // thread will block until it receives a message from the coordinator, upon
1067 // which it will codegen one CGU, send it to the coordinator and block
1068 // again. This way the coordinator can control what the main thread is
1071 // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1072 // available, it will spawn off a new LLVM worker thread and let it process
1073 // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
1074 // it will just shut down, which also frees all resources associated with
1075 // the given LLVM module, and sends a message to the coordinator that the
1076 // has been completed.
1080 // The scheduler's goal is to minimize the time it takes to complete all
1081 // work there is, however, we also want to keep memory consumption low
1082 // if possible. These two goals are at odds with each other: If memory
1083 // consumption were not an issue, we could just let the main thread produce
1084 // LLVM WorkItems at full speed, assuring maximal utilization of
1085 // Tokens/LLVM worker threads. However, since codegen usual is faster
1086 // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1087 // WorkItem potentially holds on to a substantial amount of memory.
1089 // So the actual goal is to always produce just enough LLVM WorkItems as
1090 // not to starve our LLVM worker threads. That means, once we have enough
1091 // WorkItems in our queue, we can block the main thread, so it does not
1092 // produce more until we need them.
1094 // Doing LLVM Work on the Main Thread
1095 // ----------------------------------
1096 // Since the main thread owns the compiler processes implicit `Token`, it is
1097 // wasteful to keep it blocked without doing any work. Therefore, what we do
1098 // in this case is: We spawn off an additional LLVM worker thread that helps
1099 // reduce the queue. The work it is doing corresponds to the implicit
1100 // `Token`. The coordinator will mark the main thread as being busy with
1101 // LLVM work. (The actual work happens on another OS thread but we just care
1102 // about `Tokens`, not actual threads).
1104 // When any LLVM worker thread finishes while the main thread is marked as
1105 // "busy with LLVM work", we can do a little switcheroo: We give the Token
1106 // of the just finished thread to the LLVM worker thread that is working on
1107 // behalf of the main thread's implicit Token, thus freeing up the main
1108 // thread again. The coordinator can then again decide what the main thread
1109 // should do. This allows the coordinator to make decisions at more points
1112 // Striking a Balance between Throughput and Memory Consumption
1113 // ------------------------------------------------------------
1114 // Since our two goals, (1) use as many Tokens as possible and (2) keep
1115 // memory consumption as low as possible, are in conflict with each other,
1116 // we have to find a trade off between them. Right now, the goal is to keep
1117 // all workers busy, which means that no worker should find the queue empty
1118 // when it is ready to start.
1119 // How do we do achieve this? Good question :) We actually never know how
1120 // many `Tokens` are potentially available so it's hard to say how much to
1121 // fill up the queue before switching the main thread to LLVM work. Also we
1122 // currently don't have a means to estimate how long a running LLVM worker
1123 // will still be busy with it's current WorkItem. However, we know the
1124 // maximal count of available Tokens that makes sense (=the number of CPU
1125 // cores), so we can take a conservative guess. The heuristic we use here
1126 // is implemented in the `queue_full_enough()` function.
1128 // Some Background on Jobservers
1129 // -----------------------------
1130 // It's worth also touching on the management of parallelism here. We don't
1131 // want to just spawn a thread per work item because while that's optimal
1132 // parallelism it may overload a system with too many threads or violate our
1133 // configuration for the maximum amount of cpu to use for this process. To
1134 // manage this we use the `jobserver` crate.
1136 // Job servers are an artifact of GNU make and are used to manage
1137 // parallelism between processes. A jobserver is a glorified IPC semaphore
1138 // basically. Whenever we want to run some work we acquire the semaphore,
1139 // and whenever we're done with that work we release the semaphore. In this
1140 // manner we can ensure that the maximum number of parallel workers is
1141 // capped at any one point in time.
1143 // LTO and the coordinator thread
1144 // ------------------------------
1146 // The final job the coordinator thread is responsible for is managing LTO
1147 // and how that works. When LTO is requested what we'll to is collect all
1148 // optimized LLVM modules into a local vector on the coordinator. Once all
1149 // modules have been codegened and optimized we hand this to the `lto`
1150 // module for further optimization. The `lto` module will return back a list
1151 // of more modules to work on, which the coordinator will continue to spawn
1154 // Each LLVM module is automatically sent back to the coordinator for LTO if
1155 // necessary. There's already optimizations in place to avoid sending work
1156 // back to the coordinator if LTO isn't requested.
1157 return thread::spawn(move || {
1158 let max_workers = ::num_cpus::get();
1159 let mut worker_id_counter = 0;
1160 let mut free_worker_ids = Vec::new();
1161 let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
1162 if let Some(id) = free_worker_ids.pop() {
1165 let id = worker_id_counter;
1166 worker_id_counter += 1;
1171 // This is where we collect codegen units that have gone all the way
1172 // through codegen and LLVM.
1173 let mut compiled_modules = vec![];
1174 let mut compiled_metadata_module = None;
1175 let mut compiled_allocator_module = None;
1176 let mut needs_fat_lto = Vec::new();
1177 let mut needs_thin_lto = Vec::new();
1178 let mut lto_import_only_modules = Vec::new();
1179 let mut started_lto = false;
1180 let mut codegen_aborted = false;
1182 // This flag tracks whether all items have gone through codegens
1183 let mut codegen_done = false;
1185 // This is the queue of LLVM work items that still need processing.
1186 let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
1188 // This are the Jobserver Tokens we currently hold. Does not include
1189 // the implicit Token the compiler process owns no matter what.
1190 let mut tokens = Vec::new();
1192 let mut main_thread_worker_state = MainThreadWorkerState::Idle;
1193 let mut running = 0;
1195 let prof = &cgcx.prof;
1196 let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
1198 // Run the message loop while there's still anything that needs message
1199 // processing. Note that as soon as codegen is aborted we simply want to
1200 // wait for all existing work to finish, so many of the conditions here
1201 // only apply if codegen hasn't been aborted as they represent pending
1205 || (!codegen_aborted
1206 && !(work_items.is_empty()
1207 && needs_fat_lto.is_empty()
1208 && needs_thin_lto.is_empty()
1209 && lto_import_only_modules.is_empty()
1210 && main_thread_worker_state == MainThreadWorkerState::Idle))
1212 // While there are still CGUs to be codegened, the coordinator has
1213 // to decide how to utilize the compiler processes implicit Token:
1214 // For codegenning more CGU or for running them through LLVM.
1216 if main_thread_worker_state == MainThreadWorkerState::Idle {
1217 if !queue_full_enough(work_items.len(), running, max_workers) {
1218 // The queue is not full enough, codegen more items:
1219 if codegen_worker_send.send(Message::CodegenItem).is_err() {
1220 panic!("Could not send Message::CodegenItem to main thread")
1222 main_thread_worker_state = MainThreadWorkerState::Codegenning;
1224 // The queue is full enough to not let the worker
1225 // threads starve. Use the implicit Token to do some
1228 work_items.pop().expect("queue empty - queue_full_enough() broken?");
1229 let cgcx = CodegenContext {
1230 worker: get_worker_id(&mut free_worker_ids),
1233 maybe_start_llvm_timer(
1235 cgcx.config(item.module_kind()),
1236 &mut llvm_start_time,
1238 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1239 spawn_work(cgcx, item);
1242 } else if codegen_aborted {
1243 // don't queue up any more work if codegen was aborted, we're
1244 // just waiting for our existing children to finish
1246 // If we've finished everything related to normal codegen
1247 // then it must be the case that we've got some LTO work to do.
1248 // Perform the serial work here of figuring out what we're
1249 // going to LTO and then push a bunch of work items onto our
1251 if work_items.is_empty()
1253 && main_thread_worker_state == MainThreadWorkerState::Idle
1255 assert!(!started_lto);
1258 let needs_fat_lto = mem::take(&mut needs_fat_lto);
1259 let needs_thin_lto = mem::take(&mut needs_thin_lto);
1260 let import_only_modules = mem::take(&mut lto_import_only_modules);
1263 generate_lto_work(&cgcx, needs_fat_lto, needs_thin_lto, import_only_modules)
1265 let insertion_index = work_items
1266 .binary_search_by_key(&cost, |&(_, cost)| cost)
1267 .unwrap_or_else(|e| e);
1268 work_items.insert(insertion_index, (work, cost));
1269 if !cgcx.opts.debugging_opts.no_parallel_llvm {
1270 helper.request_token();
1275 // In this branch, we know that everything has been codegened,
1276 // so it's just a matter of determining whether the implicit
1277 // Token is free to use for LLVM work.
1278 match main_thread_worker_state {
1279 MainThreadWorkerState::Idle => {
1280 if let Some((item, _)) = work_items.pop() {
1281 let cgcx = CodegenContext {
1282 worker: get_worker_id(&mut free_worker_ids),
1285 maybe_start_llvm_timer(
1287 cgcx.config(item.module_kind()),
1288 &mut llvm_start_time,
1290 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1291 spawn_work(cgcx, item);
1293 // There is no unstarted work, so let the main thread
1294 // take over for a running worker. Otherwise the
1295 // implicit token would just go to waste.
1296 // We reduce the `running` counter by one. The
1297 // `tokens.truncate()` below will take care of
1298 // giving the Token back.
1299 debug_assert!(running > 0);
1301 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1304 MainThreadWorkerState::Codegenning => bug!(
1305 "codegen worker should not be codegenning after \
1306 codegen was already completed"
1308 MainThreadWorkerState::LLVMing => {
1309 // Already making good use of that token
1314 // Spin up what work we can, only doing this while we've got available
1315 // parallelism slots and work left to spawn.
1316 while !codegen_aborted && !work_items.is_empty() && running < tokens.len() {
1317 let (item, _) = work_items.pop().unwrap();
1319 maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
1322 CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
1324 spawn_work(cgcx, item);
1328 // Relinquish accidentally acquired extra tokens
1329 tokens.truncate(running);
1331 // If a thread exits successfully then we drop a token associated
1332 // with that worker and update our `running` count. We may later
1333 // re-acquire a token to continue running more work. We may also not
1334 // actually drop a token here if the worker was running with an
1335 // "ephemeral token"
1336 let mut free_worker = |worker_id| {
1337 if main_thread_worker_state == MainThreadWorkerState::LLVMing {
1338 main_thread_worker_state = MainThreadWorkerState::Idle;
1343 free_worker_ids.push(worker_id);
1346 let msg = coordinator_receive.recv().unwrap();
1347 match *msg.downcast::<Message<B>>().ok().unwrap() {
1348 // Save the token locally and the next turn of the loop will use
1349 // this to spawn a new unit of work, or it may get dropped
1350 // immediately if we have no more work to spawn.
1351 Message::Token(token) => {
1356 if main_thread_worker_state == MainThreadWorkerState::LLVMing {
1357 // If the main thread token is used for LLVM work
1358 // at the moment, we turn that thread into a regular
1359 // LLVM worker thread, so the main thread is free
1360 // to react to codegen demand.
1361 main_thread_worker_state = MainThreadWorkerState::Idle;
1366 let msg = &format!("failed to acquire jobserver token: {}", e);
1367 shared_emitter.fatal(msg);
1368 // Exit the coordinator thread
1374 Message::CodegenDone { llvm_work_item, cost } => {
1375 // We keep the queue sorted by estimated processing cost,
1376 // so that more expensive items are processed earlier. This
1377 // is good for throughput as it gives the main thread more
1378 // time to fill up the queue and it avoids scheduling
1379 // expensive items to the end.
1380 // Note, however, that this is not ideal for memory
1381 // consumption, as LLVM module sizes are not evenly
1383 let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1384 let insertion_index = match insertion_index {
1385 Ok(idx) | Err(idx) => idx,
1387 work_items.insert(insertion_index, (llvm_work_item, cost));
1389 if !cgcx.opts.debugging_opts.no_parallel_llvm {
1390 helper.request_token();
1392 assert!(!codegen_aborted);
1393 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1394 main_thread_worker_state = MainThreadWorkerState::Idle;
1397 Message::CodegenComplete => {
1398 codegen_done = true;
1399 assert!(!codegen_aborted);
1400 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1401 main_thread_worker_state = MainThreadWorkerState::Idle;
1404 // If codegen is aborted that means translation was aborted due
1405 // to some normal-ish compiler error. In this situation we want
1406 // to exit as soon as possible, but we want to make sure all
1407 // existing work has finished. Flag codegen as being done, and
1408 // then conditions above will ensure no more work is spawned but
1409 // we'll keep executing this loop until `running` hits 0.
1410 Message::CodegenAborted => {
1411 assert!(!codegen_aborted);
1412 codegen_done = true;
1413 codegen_aborted = true;
1414 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1416 Message::Done { result: Ok(compiled_module), worker_id } => {
1417 free_worker(worker_id);
1418 match compiled_module.kind {
1419 ModuleKind::Regular => {
1420 compiled_modules.push(compiled_module);
1422 ModuleKind::Metadata => {
1423 assert!(compiled_metadata_module.is_none());
1424 compiled_metadata_module = Some(compiled_module);
1426 ModuleKind::Allocator => {
1427 assert!(compiled_allocator_module.is_none());
1428 compiled_allocator_module = Some(compiled_module);
1432 Message::NeedsFatLTO { result, worker_id } => {
1433 assert!(!started_lto);
1434 free_worker(worker_id);
1435 needs_fat_lto.push(result);
1437 Message::NeedsThinLTO { name, thin_buffer, worker_id } => {
1438 assert!(!started_lto);
1439 free_worker(worker_id);
1440 needs_thin_lto.push((name, thin_buffer));
1442 Message::AddImportOnlyModule { module_data, work_product } => {
1443 assert!(!started_lto);
1444 assert!(!codegen_done);
1445 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1446 lto_import_only_modules.push((module_data, work_product));
1447 main_thread_worker_state = MainThreadWorkerState::Idle;
1449 // If the thread failed that means it panicked, so we abort immediately.
1450 Message::Done { result: Err(None), worker_id: _ } => {
1451 bug!("worker thread panicked");
1453 Message::Done { result: Err(Some(WorkerFatalError)), worker_id: _ } => {
1456 Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
1460 // Drop to print timings
1461 drop(llvm_start_time);
1463 // Regardless of what order these modules completed in, report them to
1464 // the backend in the same order every time to ensure that we're handing
1465 // out deterministic results.
1466 compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
1468 Ok(CompiledModules {
1469 modules: compiled_modules,
1470 metadata_module: compiled_metadata_module,
1471 allocator_module: compiled_allocator_module,
1475 // A heuristic that determines if we have enough LLVM WorkItems in the
1476 // queue so that the main thread can do LLVM work instead of codegen
1477 fn queue_full_enough(
1478 items_in_queue: usize,
1479 workers_running: usize,
1483 items_in_queue > 0 && items_in_queue >= max_workers.saturating_sub(workers_running / 2)
1486 fn maybe_start_llvm_timer<'a>(
1487 prof: &'a SelfProfilerRef,
1488 config: &ModuleConfig,
1489 llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1491 if config.time_module && llvm_start_time.is_none() {
1492 *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes", "crate"));
1497 pub const CODEGEN_WORKER_ID: usize = usize::MAX;
1499 /// `FatalError` is explicitly not `Send`.
1501 pub struct WorkerFatalError;
1503 fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
1504 thread::spawn(move || {
1505 // Set up a destructor which will fire off a message that we're done as
1507 struct Bomb<B: ExtraBackendMethods> {
1508 coordinator_send: Sender<Box<dyn Any + Send>>,
1509 result: Option<Result<WorkItemResult<B>, FatalError>>,
1512 impl<B: ExtraBackendMethods> Drop for Bomb<B> {
1513 fn drop(&mut self) {
1514 let worker_id = self.worker_id;
1515 let msg = match self.result.take() {
1516 Some(Ok(WorkItemResult::Compiled(m))) => {
1517 Message::Done::<B> { result: Ok(m), worker_id }
1519 Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
1520 Message::NeedsFatLTO::<B> { result: m, worker_id }
1522 Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
1523 Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
1525 Some(Err(FatalError)) => {
1526 Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
1528 None => Message::Done::<B> { result: Err(None), worker_id },
1530 drop(self.coordinator_send.send(Box::new(msg)));
1534 let mut bomb = Bomb::<B> {
1535 coordinator_send: cgcx.coordinator_send.clone(),
1537 worker_id: cgcx.worker,
1540 // Execute the work itself, and if it finishes successfully then flag
1541 // ourselves as a success as well.
1543 // Note that we ignore any `FatalError` coming out of `execute_work_item`,
1544 // as a diagnostic was already sent off to the main thread - just
1545 // surface that there was an error in this worker.
1547 let _prof_timer = work.start_profiling(&cgcx);
1548 Some(execute_work_item(&cgcx, work))
1553 enum SharedEmitterMessage {
1554 Diagnostic(Diagnostic),
1555 InlineAsmError(u32, String),
1561 pub struct SharedEmitter {
1562 sender: Sender<SharedEmitterMessage>,
1565 pub struct SharedEmitterMain {
1566 receiver: Receiver<SharedEmitterMessage>,
1569 impl SharedEmitter {
1570 pub fn new() -> (SharedEmitter, SharedEmitterMain) {
1571 let (sender, receiver) = channel();
1573 (SharedEmitter { sender }, SharedEmitterMain { receiver })
1576 pub fn inline_asm_error(&self, cookie: u32, msg: String) {
1577 drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg)));
1580 pub fn fatal(&self, msg: &str) {
1581 drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
1585 impl Emitter for SharedEmitter {
1586 fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
1587 drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1588 msg: diag.message(),
1589 code: diag.code.clone(),
1592 for child in &diag.children {
1593 drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1594 msg: child.message(),
1599 drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
1601 fn source_map(&self) -> Option<&Lrc<SourceMap>> {
1606 impl SharedEmitterMain {
1607 pub fn check(&self, sess: &Session, blocking: bool) {
1609 let message = if blocking {
1610 match self.receiver.recv() {
1611 Ok(message) => Ok(message),
1615 match self.receiver.try_recv() {
1616 Ok(message) => Ok(message),
1622 Ok(SharedEmitterMessage::Diagnostic(diag)) => {
1623 let handler = sess.diagnostic();
1624 let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg);
1625 if let Some(code) = diag.code {
1628 handler.emit_diagnostic(&d);
1630 Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => {
1631 sess.span_err(ExpnId::from_u32(cookie).expn_data().call_site, &msg)
1633 Ok(SharedEmitterMessage::AbortIfErrors) => {
1634 sess.abort_if_errors();
1636 Ok(SharedEmitterMessage::Fatal(msg)) => {
1647 pub struct OngoingCodegen<B: ExtraBackendMethods> {
1649 pub crate_name: Symbol,
1650 pub crate_hash: Svh,
1651 pub metadata: EncodedMetadata,
1652 pub windows_subsystem: Option<String>,
1653 pub linker_info: LinkerInfo,
1654 pub crate_info: CrateInfo,
1655 pub coordinator_send: Sender<Box<dyn Any + Send>>,
1656 pub codegen_worker_receive: Receiver<Message<B>>,
1657 pub shared_emitter_main: SharedEmitterMain,
1658 pub future: thread::JoinHandle<Result<CompiledModules, ()>>,
1659 pub output_filenames: Arc<OutputFilenames>,
1662 impl<B: ExtraBackendMethods> OngoingCodegen<B> {
1663 pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
1664 let _timer = sess.timer("finish_ongoing_codegen");
1666 self.shared_emitter_main.check(sess, true);
1667 let future = self.future;
1668 let compiled_modules = sess.time("join_worker_thread", || match future.join() {
1669 Ok(Ok(compiled_modules)) => compiled_modules,
1671 sess.abort_if_errors();
1672 panic!("expected abort due to worker thread errors")
1675 bug!("panic during codegen/LLVM phase");
1679 sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
1681 sess.abort_if_errors();
1684 copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
1685 produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
1687 // FIXME: time_llvm_passes support - does this use a global context or
1689 if sess.codegen_units() == 1 && sess.time_llvm_passes() {
1690 self.backend.print_pass_timings()
1695 crate_name: self.crate_name,
1696 crate_hash: self.crate_hash,
1697 metadata: self.metadata,
1698 windows_subsystem: self.windows_subsystem,
1699 linker_info: self.linker_info,
1700 crate_info: self.crate_info,
1702 modules: compiled_modules.modules,
1703 allocator_module: compiled_modules.allocator_module,
1704 metadata_module: compiled_modules.metadata_module,
1710 pub fn submit_pre_codegened_module_to_llvm(
1713 module: ModuleCodegen<B::Module>,
1715 self.wait_for_signal_to_codegen_item();
1716 self.check_for_errors(tcx.sess);
1718 // These are generally cheap and won't throw off scheduling.
1720 submit_codegened_module_to_llvm(&self.backend, &self.coordinator_send, module, cost);
1723 pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
1724 self.wait_for_signal_to_codegen_item();
1725 self.check_for_errors(tcx.sess);
1726 drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::<B>)));
1729 /// Consumes this context indicating that codegen was entirely aborted, and
1730 /// we need to exit as quickly as possible.
1732 /// This method blocks the current thread until all worker threads have
1733 /// finished, and all worker threads should have exited or be real close to
1734 /// exiting at this point.
1735 pub fn codegen_aborted(self) {
1736 // Signal to the coordinator it should spawn no more work and start
1738 drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::<B>)));
1739 drop(self.future.join());
1742 pub fn check_for_errors(&self, sess: &Session) {
1743 self.shared_emitter_main.check(sess, false);
1746 pub fn wait_for_signal_to_codegen_item(&self) {
1747 match self.codegen_worker_receive.recv() {
1748 Ok(Message::CodegenItem) => {
1751 Ok(_) => panic!("unexpected message"),
1753 // One of the LLVM threads must have panicked, fall through so
1754 // error handling can be reached.
1760 pub fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
1762 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1763 module: ModuleCodegen<B::Module>,
1766 let llvm_work_item = WorkItem::Optimize(module);
1767 drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost })));
1770 pub fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
1772 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1773 module: CachedModuleCodegen,
1775 let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
1776 drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost: 0 })));
1779 pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
1782 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1783 module: CachedModuleCodegen,
1785 let filename = pre_lto_bitcode_filename(&module.name);
1786 let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
1787 let file = fs::File::open(&bc_path)
1788 .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
1791 memmap::Mmap::map(&file).unwrap_or_else(|e| {
1792 panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
1795 // Schedule the module to be loaded
1796 drop(tx_to_llvm_workers.send(Box::new(Message::AddImportOnlyModule::<B> {
1797 module_data: SerializedModule::FromUncompressedFile(mmap),
1798 work_product: module.source,
1802 pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
1803 format!("{}.{}", module_name, PRE_LTO_BC_EXT)
1806 fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
1807 // This should never be true (because it's not supported). If it is true,
1808 // something is wrong with commandline arg validation.
1810 !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
1811 && tcx.sess.target.target.options.is_like_msvc
1812 && tcx.sess.opts.cg.prefer_dynamic)
1815 tcx.sess.target.target.options.is_like_msvc &&
1816 tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) &&
1817 // ThinLTO can't handle this workaround in all cases, so we don't
1818 // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
1819 // dynamic linking when linker plugin LTO is enabled.
1820 !tcx.sess.opts.cg.linker_plugin_lto.enabled()