1 use super::command::Command;
2 use super::link::{self, get_linker, remove};
3 use super::linker::LinkerInfo;
4 use super::lto::{self, SerializedModule};
5 use super::symbol_export::symbol_name_for_instance_in_crate;
8 CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
9 RLIB_BYTECODE_EXTENSION,
13 use jobserver::{Acquired, Client};
14 use rustc::dep_graph::{WorkProduct, WorkProductFileKind, WorkProductId};
15 use rustc::middle::cstore::EncodedMetadata;
16 use rustc::middle::exported_symbols::SymbolExportLevel;
17 use rustc::session::config::{
18 self, Lto, OutputFilenames, OutputType, Passes, Sanitizer, SwitchWithOptPath,
20 use rustc::session::Session;
21 use rustc::ty::TyCtxt;
22 use rustc_data_structures::fx::FxHashMap;
23 use rustc_data_structures::profiling::SelfProfilerRef;
24 use rustc_data_structures::profiling::VerboseTimingGuard;
25 use rustc_data_structures::svh::Svh;
26 use rustc_data_structures::sync::Lrc;
27 use rustc_errors::emitter::Emitter;
28 use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
29 use rustc_fs_util::link_or_copy;
30 use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
31 use rustc_incremental::{
32 copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
34 use rustc_session::cgu_reuse_tracker::CguReuseTracker;
35 use rustc_span::hygiene::ExpnId;
36 use rustc_span::source_map::SourceMap;
37 use rustc_span::symbol::{sym, Symbol};
38 use rustc_target::spec::MergeFunctions;
45 use std::path::{Path, PathBuf};
47 use std::sync::mpsc::{channel, Receiver, Sender};
51 const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
53 /// Module-specific configuration for `optimize_and_codegen`.
54 pub struct ModuleConfig {
55 /// Names of additional optimization passes to run.
56 pub passes: Vec<String>,
57 /// Some(level) to optimize at a certain level, or None to run
58 /// absolutely no optimizations (used for the metadata module).
59 pub opt_level: Option<config::OptLevel>,
61 /// Some(level) to optimize binary size, or None to not affect program size.
62 pub opt_size: Option<config::OptLevel>,
64 pub pgo_gen: SwitchWithOptPath,
65 pub pgo_use: Option<PathBuf>,
67 pub sanitizer: Option<Sanitizer>,
68 pub sanitizer_recover: Vec<Sanitizer>,
69 pub sanitizer_memory_track_origins: usize,
71 // Flags indicating which outputs to produce.
72 pub emit_pre_lto_bc: bool,
73 pub emit_no_opt_bc: bool,
75 pub emit_bc_compressed: bool,
76 pub emit_lto_bc: bool,
80 // Miscellaneous flags. These are mostly copied from command-line
82 pub verify_llvm_ir: bool,
83 pub no_prepopulate_passes: bool,
84 pub no_builtins: bool,
85 pub time_module: bool,
86 pub vectorize_loop: bool,
87 pub vectorize_slp: bool,
88 pub merge_functions: bool,
89 pub inline_threshold: Option<usize>,
90 // Instead of creating an object file by doing LLVM codegen, just
91 // make the object file bitcode. Provides easy compatibility with
92 // emscripten's ecc compiler, when used as the linker.
93 pub obj_is_bitcode: bool,
94 pub no_integrated_as: bool,
95 pub embed_bitcode: bool,
96 pub embed_bitcode_marker: bool,
100 fn new(passes: Vec<String>) -> ModuleConfig {
106 pgo_gen: SwitchWithOptPath::Disabled,
110 sanitizer_recover: Default::default(),
111 sanitizer_memory_track_origins: 0,
113 emit_no_opt_bc: false,
114 emit_pre_lto_bc: false,
116 emit_bc_compressed: false,
121 obj_is_bitcode: false,
122 embed_bitcode: false,
123 embed_bitcode_marker: false,
124 no_integrated_as: false,
126 verify_llvm_ir: false,
127 no_prepopulate_passes: false,
130 vectorize_loop: false,
131 vectorize_slp: false,
132 merge_functions: false,
133 inline_threshold: None,
137 fn set_flags(&mut self, sess: &Session, no_builtins: bool) {
138 self.verify_llvm_ir = sess.verify_llvm_ir();
139 self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes;
140 self.no_builtins = no_builtins || sess.target.target.options.no_builtins;
141 self.inline_threshold = sess.opts.cg.inline_threshold;
142 self.obj_is_bitcode =
143 sess.target.target.options.obj_is_bitcode || sess.opts.cg.linker_plugin_lto.enabled();
145 sess.target.target.options.embed_bitcode || sess.opts.debugging_opts.embed_bitcode;
147 match sess.opts.optimize {
148 config::OptLevel::No | config::OptLevel::Less => {
149 self.embed_bitcode_marker = embed_bitcode;
151 _ => self.embed_bitcode = embed_bitcode,
155 // Copy what clang does by turning on loop vectorization at O2 and
156 // slp vectorization at O3. Otherwise configure other optimization aspects
157 // of this pass manager builder.
158 self.vectorize_loop = !sess.opts.cg.no_vectorize_loops
159 && (sess.opts.optimize == config::OptLevel::Default
160 || sess.opts.optimize == config::OptLevel::Aggressive);
163 !sess.opts.cg.no_vectorize_slp && sess.opts.optimize == config::OptLevel::Aggressive;
165 // Some targets (namely, NVPTX) interact badly with the MergeFunctions
166 // pass. This is because MergeFunctions can generate new function calls
167 // which may interfere with the target calling convention; e.g. for the
168 // NVPTX target, PTX kernels should not call other PTX kernels.
169 // MergeFunctions can also be configured to generate aliases instead,
170 // but aliases are not supported by some backends (again, NVPTX).
171 // Therefore, allow targets to opt out of the MergeFunctions pass,
172 // but otherwise keep the pass enabled (at O2 and O3) since it can be
173 // useful for reducing code size.
174 self.merge_functions = match sess
178 .unwrap_or(sess.target.target.options.merge_functions)
180 MergeFunctions::Disabled => false,
181 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
182 sess.opts.optimize == config::OptLevel::Default
183 || sess.opts.optimize == config::OptLevel::Aggressive
188 pub fn bitcode_needed(&self) -> bool {
189 self.emit_bc || self.obj_is_bitcode || self.emit_bc_compressed || self.embed_bitcode
193 /// Assembler name and command used by codegen when no_integrated_as is enabled
194 pub struct AssemblerCommand {
199 // HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`.
200 pub struct TargetMachineFactory<B: WriteBackendMethods>(
201 pub Arc<dyn Fn() -> Result<B::TargetMachine, String> + Send + Sync>,
204 impl<B: WriteBackendMethods> Clone for TargetMachineFactory<B> {
205 fn clone(&self) -> Self {
206 TargetMachineFactory(self.0.clone())
210 pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportLevel)>>>;
212 /// Additional resources used by optimize_and_codegen (not module specific)
214 pub struct CodegenContext<B: WriteBackendMethods> {
215 // Resources needed when running LTO
217 pub prof: SelfProfilerRef,
219 pub no_landing_pads: bool,
220 pub save_temps: bool,
221 pub fewer_names: bool,
222 pub exported_symbols: Option<Arc<ExportedSymbols>>,
223 pub opts: Arc<config::Options>,
224 pub crate_types: Vec<config::CrateType>,
225 pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
226 pub output_filenames: Arc<OutputFilenames>,
227 pub regular_module_config: Arc<ModuleConfig>,
228 pub metadata_module_config: Arc<ModuleConfig>,
229 pub allocator_module_config: Arc<ModuleConfig>,
230 pub tm_factory: TargetMachineFactory<B>,
231 pub msvc_imps_needed: bool,
232 pub target_pointer_width: String,
233 pub target_arch: String,
234 pub debuginfo: config::DebugInfo,
236 // Number of cgus excluding the allocator/metadata modules
237 pub total_cgus: usize,
238 // Handler to use for diagnostics produced during codegen.
239 pub diag_emitter: SharedEmitter,
240 // LLVM optimizations for which we want to print remarks.
242 // Worker thread number
244 // The incremental compilation session directory, or None if we are not
245 // compiling incrementally
246 pub incr_comp_session_dir: Option<PathBuf>,
247 // Used to update CGU re-use information during the thinlto phase.
248 pub cgu_reuse_tracker: CguReuseTracker,
249 // Channel back to the main control thread to send messages to
250 pub coordinator_send: Sender<Box<dyn Any + Send>>,
251 // The assembler command if no_integrated_as option is enabled, None otherwise
252 pub assembler_cmd: Option<Arc<AssemblerCommand>>,
255 impl<B: WriteBackendMethods> CodegenContext<B> {
256 pub fn create_diag_handler(&self) -> Handler {
257 Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
260 pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
262 ModuleKind::Regular => &self.regular_module_config,
263 ModuleKind::Metadata => &self.metadata_module_config,
264 ModuleKind::Allocator => &self.allocator_module_config,
269 fn generate_lto_work<B: ExtraBackendMethods>(
270 cgcx: &CodegenContext<B>,
271 needs_fat_lto: Vec<FatLTOInput<B>>,
272 needs_thin_lto: Vec<(String, B::ThinBuffer)>,
273 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
274 ) -> Vec<(WorkItem<B>, u64)> {
275 let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
277 let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
278 assert!(needs_thin_lto.is_empty());
280 B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
281 (vec![lto_module], vec![])
283 assert!(needs_fat_lto.is_empty());
284 B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
287 let result = lto_modules
290 let cost = module.cost();
291 (WorkItem::LTO(module), cost)
293 .chain(copy_jobs.into_iter().map(|wp| {
295 WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
296 name: wp.cgu_name.clone(),
307 pub struct CompiledModules {
308 pub modules: Vec<CompiledModule>,
309 pub metadata_module: Option<CompiledModule>,
310 pub allocator_module: Option<CompiledModule>,
313 fn need_crate_bitcode_for_rlib(sess: &Session) -> bool {
314 sess.crate_types.borrow().contains(&config::CrateType::Rlib)
315 && sess.opts.output_types.contains_key(&OutputType::Exe)
318 fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
319 if sess.opts.incremental.is_none() {
325 Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
329 pub fn start_async_codegen<B: ExtraBackendMethods>(
332 metadata: EncodedMetadata,
334 ) -> OngoingCodegen<B> {
335 let (coordinator_send, coordinator_receive) = channel();
338 let crate_name = tcx.crate_name(LOCAL_CRATE);
339 let crate_hash = tcx.crate_hash(LOCAL_CRATE);
340 let no_builtins = attr::contains_name(&tcx.hir().krate().attrs, sym::no_builtins);
342 attr::first_attr_value_str_by_name(&tcx.hir().krate().attrs, sym::windows_subsystem);
343 let windows_subsystem = subsystem.map(|subsystem| {
344 if subsystem != sym::windows && subsystem != sym::console {
345 tcx.sess.fatal(&format!(
346 "invalid windows subsystem `{}`, only \
347 `windows` and `console` are allowed",
351 subsystem.to_string()
354 let linker_info = LinkerInfo::new(tcx);
355 let crate_info = CrateInfo::new(tcx);
357 // Figure out what we actually need to build.
358 let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone());
359 let mut metadata_config = ModuleConfig::new(vec![]);
360 let mut allocator_config = ModuleConfig::new(vec![]);
362 if sess.opts.debugging_opts.profile {
363 modules_config.passes.push("insert-gcov-profiling".to_owned())
366 modules_config.pgo_gen = sess.opts.cg.profile_generate.clone();
367 modules_config.pgo_use = sess.opts.cg.profile_use.clone();
368 modules_config.sanitizer = sess.opts.debugging_opts.sanitizer.clone();
369 modules_config.sanitizer_recover = sess.opts.debugging_opts.sanitizer_recover.clone();
370 modules_config.sanitizer_memory_track_origins =
371 sess.opts.debugging_opts.sanitizer_memory_track_origins;
372 modules_config.opt_level = Some(sess.opts.optimize);
373 modules_config.opt_size = Some(sess.opts.optimize);
375 // Save all versions of the bytecode if we're saving our temporaries.
376 if sess.opts.cg.save_temps {
377 modules_config.emit_no_opt_bc = true;
378 modules_config.emit_pre_lto_bc = true;
379 modules_config.emit_bc = true;
380 modules_config.emit_lto_bc = true;
381 metadata_config.emit_bc = true;
382 allocator_config.emit_bc = true;
385 // Emit compressed bitcode files for the crate if we're emitting an rlib.
386 // Whenever an rlib is created, the bitcode is inserted into the archive in
387 // order to allow LTO against it.
388 if need_crate_bitcode_for_rlib(sess) {
389 modules_config.emit_bc_compressed = true;
390 allocator_config.emit_bc_compressed = true;
393 modules_config.emit_pre_lto_bc = need_pre_lto_bitcode_for_incr_comp(sess);
395 modules_config.no_integrated_as =
396 tcx.sess.opts.cg.no_integrated_as || tcx.sess.target.target.options.no_integrated_as;
398 for output_type in sess.opts.output_types.keys() {
400 OutputType::Bitcode => {
401 modules_config.emit_bc = true;
403 OutputType::LlvmAssembly => {
404 modules_config.emit_ir = true;
406 OutputType::Assembly => {
407 modules_config.emit_asm = true;
408 // If we're not using the LLVM assembler, this function
409 // could be invoked specially with output_type_assembly, so
410 // in this case we still want the metadata object file.
411 if !sess.opts.output_types.contains_key(&OutputType::Assembly) {
412 metadata_config.emit_obj = true;
413 allocator_config.emit_obj = true;
416 OutputType::Object => {
417 modules_config.emit_obj = true;
419 OutputType::Metadata => {
420 metadata_config.emit_obj = true;
423 modules_config.emit_obj = true;
424 metadata_config.emit_obj = true;
425 allocator_config.emit_obj = true;
427 OutputType::Mir => {}
428 OutputType::DepInfo => {}
432 modules_config.set_flags(sess, no_builtins);
433 metadata_config.set_flags(sess, no_builtins);
434 allocator_config.set_flags(sess, no_builtins);
436 // Exclude metadata and allocator modules from time_passes output, since
437 // they throw off the "LLVM passes" measurement.
438 metadata_config.time_module = false;
439 allocator_config.time_module = false;
441 let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
442 let (codegen_worker_send, codegen_worker_receive) = channel();
444 let coordinator_thread = start_executing_work(
452 sess.jobserver.clone(),
453 Arc::new(modules_config),
454 Arc::new(metadata_config),
455 Arc::new(allocator_config),
456 coordinator_send.clone(),
469 codegen_worker_receive,
471 future: coordinator_thread,
472 output_filenames: tcx.output_filenames(LOCAL_CRATE),
476 fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
478 compiled_modules: &CompiledModules,
479 ) -> FxHashMap<WorkProductId, WorkProduct> {
480 let mut work_products = FxHashMap::default();
482 if sess.opts.incremental.is_none() {
483 return work_products;
486 let _timer = sess.timer("incr_comp_copy_cgu_workproducts");
488 for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
489 let mut files = vec![];
491 if let Some(ref path) = module.object {
492 files.push((WorkProductFileKind::Object, path.clone()));
494 if let Some(ref path) = module.bytecode {
495 files.push((WorkProductFileKind::Bytecode, path.clone()));
497 if let Some(ref path) = module.bytecode_compressed {
498 files.push((WorkProductFileKind::BytecodeCompressed, path.clone()));
501 if let Some((id, product)) =
502 copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files)
504 work_products.insert(id, product);
511 fn produce_final_output_artifacts(
513 compiled_modules: &CompiledModules,
514 crate_output: &OutputFilenames,
516 let mut user_wants_bitcode = false;
517 let mut user_wants_objects = false;
519 // Produce final compile outputs.
520 let copy_gracefully = |from: &Path, to: &Path| {
521 if let Err(e) = fs::copy(from, to) {
522 sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
526 let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
527 if compiled_modules.modules.len() == 1 {
528 // 1) Only one codegen unit. In this case it's no difficulty
529 // to copy `foo.0.x` to `foo.x`.
530 let module_name = Some(&compiled_modules.modules[0].name[..]);
531 let path = crate_output.temp_path(output_type, module_name);
532 copy_gracefully(&path, &crate_output.path(output_type));
533 if !sess.opts.cg.save_temps && !keep_numbered {
534 // The user just wants `foo.x`, not `foo.#module-name#.x`.
538 let ext = crate_output
539 .temp_path(output_type, None)
546 if crate_output.outputs.contains_key(&output_type) {
547 // 2) Multiple codegen units, with `--emit foo=some_name`. We have
548 // no good solution for this case, so warn the user.
550 "ignoring emit path because multiple .{} files \
554 } else if crate_output.single_output_file.is_some() {
555 // 3) Multiple codegen units, with `-o some_name`. We have
556 // no good solution for this case, so warn the user.
558 "ignoring -o because multiple .{} files \
563 // 4) Multiple codegen units, but no explicit name. We
564 // just leave the `foo.0.x` files in place.
565 // (We don't have to do any work in this case.)
570 // Flag to indicate whether the user explicitly requested bitcode.
571 // Otherwise, we produced it only as a temporary output, and will need
573 for output_type in crate_output.outputs.keys() {
575 OutputType::Bitcode => {
576 user_wants_bitcode = true;
577 // Copy to .bc, but always keep the .0.bc. There is a later
578 // check to figure out if we should delete .0.bc files, or keep
579 // them for making an rlib.
580 copy_if_one_unit(OutputType::Bitcode, true);
582 OutputType::LlvmAssembly => {
583 copy_if_one_unit(OutputType::LlvmAssembly, false);
585 OutputType::Assembly => {
586 copy_if_one_unit(OutputType::Assembly, false);
588 OutputType::Object => {
589 user_wants_objects = true;
590 copy_if_one_unit(OutputType::Object, true);
592 OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
596 // Clean up unwanted temporary files.
598 // We create the following files by default:
599 // - #crate#.#module-name#.bc
600 // - #crate#.#module-name#.o
601 // - #crate#.crate.metadata.bc
602 // - #crate#.crate.metadata.o
603 // - #crate#.o (linked from crate.##.o)
604 // - #crate#.bc (copied from crate.##.bc)
605 // We may create additional files if requested by the user (through
606 // `-C save-temps` or `--emit=` flags).
608 if !sess.opts.cg.save_temps {
609 // Remove the temporary .#module-name#.o objects. If the user didn't
610 // explicitly request bitcode (with --emit=bc), and the bitcode is not
611 // needed for building an rlib, then we must remove .#module-name#.bc as
614 // Specific rules for keeping .#module-name#.bc:
615 // - If the user requested bitcode (`user_wants_bitcode`), and
616 // codegen_units > 1, then keep it.
617 // - If the user requested bitcode but codegen_units == 1, then we
618 // can toss .#module-name#.bc because we copied it to .bc earlier.
619 // - If we're not building an rlib and the user didn't request
620 // bitcode, then delete .#module-name#.bc.
621 // If you change how this works, also update back::link::link_rlib,
622 // where .#module-name#.bc files are (maybe) deleted after making an
624 let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
626 let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
628 let keep_numbered_objects =
629 needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
631 for module in compiled_modules.modules.iter() {
632 if let Some(ref path) = module.object {
633 if !keep_numbered_objects {
638 if let Some(ref path) = module.bytecode {
639 if !keep_numbered_bitcode {
645 if !user_wants_bitcode {
646 if let Some(ref metadata_module) = compiled_modules.metadata_module {
647 if let Some(ref path) = metadata_module.bytecode {
652 if let Some(ref allocator_module) = compiled_modules.allocator_module {
653 if let Some(ref path) = allocator_module.bytecode {
660 // We leave the following files around by default:
662 // - #crate#.crate.metadata.o
664 // These are used in linking steps and will be cleaned up afterward.
667 pub fn dump_incremental_data(_codegen_results: &CodegenResults) {
668 // FIXME(mw): This does not work at the moment because the situation has
669 // become more complicated due to incremental LTO. Now a CGU
670 // can have more than two caching states.
671 // println!("[incremental] Re-using {} out of {} modules",
672 // codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
673 // codegen_results.modules.len());
676 pub enum WorkItem<B: WriteBackendMethods> {
677 /// Optimize a newly codegened, totally unoptimized module.
678 Optimize(ModuleCodegen<B::Module>),
679 /// Copy the post-LTO artifacts from the incremental cache to the output
681 CopyPostLtoArtifacts(CachedModuleCodegen),
682 /// Performs (Thin)LTO on the given module.
683 LTO(lto::LtoModuleCodegen<B>),
686 impl<B: WriteBackendMethods> WorkItem<B> {
687 pub fn module_kind(&self) -> ModuleKind {
689 WorkItem::Optimize(ref m) => m.kind,
690 WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
694 fn profiling_event_id(&self) -> &'static str {
696 WorkItem::Optimize(_) => "codegen_module_optimize",
697 WorkItem::CopyPostLtoArtifacts(_) => "codegen_copy_artifacts_from_incr_cache",
698 WorkItem::LTO(_) => "codegen_module_perform_lto",
703 enum WorkItemResult<B: WriteBackendMethods> {
704 Compiled(CompiledModule),
705 NeedsFatLTO(FatLTOInput<B>),
706 NeedsThinLTO(String, B::ThinBuffer),
709 pub enum FatLTOInput<B: WriteBackendMethods> {
710 Serialized { name: String, buffer: B::ModuleBuffer },
711 InMemory(ModuleCodegen<B::Module>),
714 fn execute_work_item<B: ExtraBackendMethods>(
715 cgcx: &CodegenContext<B>,
716 work_item: WorkItem<B>,
717 ) -> Result<WorkItemResult<B>, FatalError> {
718 let module_config = cgcx.config(work_item.module_kind());
721 WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
722 WorkItem::CopyPostLtoArtifacts(module) => {
723 execute_copy_from_cache_work_item(cgcx, module, module_config)
725 WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
729 // Actual LTO type we end up chosing based on multiple factors.
730 enum ComputedLtoType {
736 fn execute_optimize_work_item<B: ExtraBackendMethods>(
737 cgcx: &CodegenContext<B>,
738 module: ModuleCodegen<B::Module>,
739 module_config: &ModuleConfig,
740 ) -> Result<WorkItemResult<B>, FatalError> {
741 let diag_handler = cgcx.create_diag_handler();
744 B::optimize(cgcx, &diag_handler, &module, module_config)?;
747 // After we've done the initial round of optimizations we need to
748 // decide whether to synchronously codegen this module or ship it
749 // back to the coordinator thread for further LTO processing (which
750 // has to wait for all the initial modules to be optimized).
752 // If the linker does LTO, we don't have to do it. Note that we
753 // keep doing full LTO, if it is requested, as not to break the
754 // assumption that the output will be a single module.
755 let linker_does_lto = cgcx.opts.cg.linker_plugin_lto.enabled();
757 // When we're automatically doing ThinLTO for multi-codegen-unit
758 // builds we don't actually want to LTO the allocator modules if
759 // it shows up. This is due to various linker shenanigans that
760 // we'll encounter later.
761 let is_allocator = module.kind == ModuleKind::Allocator;
763 // We ignore a request for full crate grath LTO if the cate type
764 // is only an rlib, as there is no full crate graph to process,
765 // that'll happen later.
767 // This use case currently comes up primarily for targets that
768 // require LTO so the request for LTO is always unconditionally
769 // passed down to the backend, but we don't actually want to do
770 // anything about it yet until we've got a final product.
771 let is_rlib = cgcx.crate_types.len() == 1 && cgcx.crate_types[0] == config::CrateType::Rlib;
773 // Metadata modules never participate in LTO regardless of the lto
775 let lto_type = if module.kind == ModuleKind::Metadata {
779 Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
780 Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
781 Lto::Fat if !is_rlib => ComputedLtoType::Fat,
782 _ => ComputedLtoType::No,
786 // If we're doing some form of incremental LTO then we need to be sure to
787 // save our module to disk first.
788 let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
789 let filename = pre_lto_bitcode_filename(&module.name);
790 cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
796 ComputedLtoType::No => {
797 let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
798 WorkItemResult::Compiled(module)
800 ComputedLtoType::Thin => {
801 let (name, thin_buffer) = B::prepare_thin(module);
802 if let Some(path) = bitcode {
803 fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
804 panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
807 WorkItemResult::NeedsThinLTO(name, thin_buffer)
809 ComputedLtoType::Fat => match bitcode {
811 let (name, buffer) = B::serialize_module(module);
812 fs::write(&path, buffer.data()).unwrap_or_else(|e| {
813 panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
815 WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer })
817 None => WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module)),
822 fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
823 cgcx: &CodegenContext<B>,
824 module: CachedModuleCodegen,
825 module_config: &ModuleConfig,
826 ) -> Result<WorkItemResult<B>, FatalError> {
827 let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
828 let mut object = None;
829 let mut bytecode = None;
830 let mut bytecode_compressed = None;
831 for (kind, saved_file) in &module.source.saved_files {
832 let obj_out = match kind {
833 WorkProductFileKind::Object => {
834 let path = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name));
835 object = Some(path.clone());
838 WorkProductFileKind::Bytecode => {
839 let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(&module.name));
840 bytecode = Some(path.clone());
843 WorkProductFileKind::BytecodeCompressed => {
846 .temp_path(OutputType::Bitcode, Some(&module.name))
847 .with_extension(RLIB_BYTECODE_EXTENSION);
848 bytecode_compressed = Some(path.clone());
852 let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
854 "copying pre-existing module `{}` from {:?} to {}",
859 if let Err(err) = link_or_copy(&source_file, &obj_out) {
860 let diag_handler = cgcx.create_diag_handler();
861 diag_handler.err(&format!(
862 "unable to copy {} to {}: {}",
863 source_file.display(),
870 assert_eq!(object.is_some(), module_config.emit_obj);
871 assert_eq!(bytecode.is_some(), module_config.emit_bc);
872 assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed);
874 Ok(WorkItemResult::Compiled(CompiledModule {
876 kind: ModuleKind::Regular,
883 fn execute_lto_work_item<B: ExtraBackendMethods>(
884 cgcx: &CodegenContext<B>,
885 mut module: lto::LtoModuleCodegen<B>,
886 module_config: &ModuleConfig,
887 ) -> Result<WorkItemResult<B>, FatalError> {
888 let diag_handler = cgcx.create_diag_handler();
891 let module = module.optimize(cgcx)?;
892 let module = B::codegen(cgcx, &diag_handler, module, module_config)?;
893 Ok(WorkItemResult::Compiled(module))
897 pub enum Message<B: WriteBackendMethods> {
898 Token(io::Result<Acquired>),
900 result: FatLTOInput<B>,
905 thin_buffer: B::ThinBuffer,
909 result: Result<CompiledModule, Option<WorkerFatalError>>,
913 llvm_work_item: WorkItem<B>,
916 AddImportOnlyModule {
917 module_data: SerializedModule<B::ModuleBuffer>,
918 work_product: WorkProduct,
927 code: Option<DiagnosticId>,
931 #[derive(PartialEq, Clone, Copy, Debug)]
932 enum MainThreadWorkerState {
938 fn start_executing_work<B: ExtraBackendMethods>(
941 crate_info: &CrateInfo,
942 shared_emitter: SharedEmitter,
943 codegen_worker_send: Sender<Message<B>>,
944 coordinator_receive: Receiver<Box<dyn Any + Send>>,
947 modules_config: Arc<ModuleConfig>,
948 metadata_config: Arc<ModuleConfig>,
949 allocator_config: Arc<ModuleConfig>,
950 tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
951 ) -> thread::JoinHandle<Result<CompiledModules, ()>> {
952 let coordinator_send = tx_to_llvm_workers;
955 // Compute the set of symbols we need to retain when doing LTO (if we need to)
956 let exported_symbols = {
957 let mut exported_symbols = FxHashMap::default();
959 let copy_symbols = |cnum| {
961 .exported_symbols(cnum)
963 .map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
971 exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
972 Some(Arc::new(exported_symbols))
974 Lto::Fat | Lto::Thin => {
975 exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
976 for &cnum in tcx.crates().iter() {
977 exported_symbols.insert(cnum, copy_symbols(cnum));
979 Some(Arc::new(exported_symbols))
984 // First up, convert our jobserver into a helper thread so we can use normal
985 // mpsc channels to manage our messages and such.
986 // After we've requested tokens then we'll, when we can,
987 // get tokens on `coordinator_receive` which will
988 // get managed in the main loop below.
989 let coordinator_send2 = coordinator_send.clone();
990 let helper = jobserver
991 .into_helper_thread(move |token| {
992 drop(coordinator_send2.send(Box::new(Message::Token::<B>(token))));
994 .expect("failed to spawn helper thread");
996 let mut each_linked_rlib_for_lto = Vec::new();
997 drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
998 if link::ignored_for_lto(sess, crate_info, cnum) {
1001 each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
1004 let assembler_cmd = if modules_config.no_integrated_as {
1005 // HACK: currently we use linker (gcc) as our assembler
1006 let (linker, flavor) = link::linker_and_flavor(sess);
1008 let (name, mut cmd) = get_linker(sess, &linker, flavor);
1009 cmd.args(&sess.target.target.options.asm_args);
1010 Some(Arc::new(AssemblerCommand { name, cmd }))
1015 let ol = if tcx.sess.opts.debugging_opts.no_codegen
1016 || !tcx.sess.opts.output_types.should_codegen()
1018 // If we know that we won’t be doing codegen, create target machines without optimisation.
1019 config::OptLevel::No
1021 tcx.backend_optimization_level(LOCAL_CRATE)
1023 let cgcx = CodegenContext::<B> {
1024 backend: backend.clone(),
1025 crate_types: sess.crate_types.borrow().clone(),
1026 each_linked_rlib_for_lto,
1028 no_landing_pads: sess.no_landing_pads(),
1029 fewer_names: sess.fewer_names(),
1030 save_temps: sess.opts.cg.save_temps,
1031 opts: Arc::new(sess.opts.clone()),
1032 prof: sess.prof.clone(),
1034 remark: sess.opts.cg.remark.clone(),
1036 incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1037 cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
1039 diag_emitter: shared_emitter.clone(),
1040 output_filenames: tcx.output_filenames(LOCAL_CRATE),
1041 regular_module_config: modules_config,
1042 metadata_module_config: metadata_config,
1043 allocator_module_config: allocator_config,
1044 tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol, false)),
1046 msvc_imps_needed: msvc_imps_needed(tcx),
1047 target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
1048 target_arch: tcx.sess.target.target.arch.clone(),
1049 debuginfo: tcx.sess.opts.debuginfo,
1053 // This is the "main loop" of parallel work happening for parallel codegen.
1054 // It's here that we manage parallelism, schedule work, and work with
1055 // messages coming from clients.
1057 // There are a few environmental pre-conditions that shape how the system
1060 // - Error reporting only can happen on the main thread because that's the
1061 // only place where we have access to the compiler `Session`.
1062 // - LLVM work can be done on any thread.
1063 // - Codegen can only happen on the main thread.
1064 // - Each thread doing substantial work most be in possession of a `Token`
1065 // from the `Jobserver`.
1066 // - The compiler process always holds one `Token`. Any additional `Tokens`
1067 // have to be requested from the `Jobserver`.
1071 // The error reporting restriction is handled separately from the rest: We
1072 // set up a `SharedEmitter` the holds an open channel to the main thread.
1073 // When an error occurs on any thread, the shared emitter will send the
1074 // error message to the receiver main thread (`SharedEmitterMain`). The
1075 // main thread will periodically query this error message queue and emit
1076 // any error messages it has received. It might even abort compilation if
1077 // has received a fatal error. In this case we rely on all other threads
1078 // being torn down automatically with the main thread.
1079 // Since the main thread will often be busy doing codegen work, error
1080 // reporting will be somewhat delayed, since the message queue can only be
1081 // checked in between to work packages.
1083 // Work Processing Infrastructure
1084 // ==============================
1085 // The work processing infrastructure knows three major actors:
1087 // - the coordinator thread,
1088 // - the main thread, and
1089 // - LLVM worker threads
1091 // The coordinator thread is running a message loop. It instructs the main
1092 // thread about what work to do when, and it will spawn off LLVM worker
1093 // threads as open LLVM WorkItems become available.
1095 // The job of the main thread is to codegen CGUs into LLVM work package
1096 // (since the main thread is the only thread that can do this). The main
1097 // thread will block until it receives a message from the coordinator, upon
1098 // which it will codegen one CGU, send it to the coordinator and block
1099 // again. This way the coordinator can control what the main thread is
1102 // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1103 // available, it will spawn off a new LLVM worker thread and let it process
1104 // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
1105 // it will just shut down, which also frees all resources associated with
1106 // the given LLVM module, and sends a message to the coordinator that the
1107 // has been completed.
1111 // The scheduler's goal is to minimize the time it takes to complete all
1112 // work there is, however, we also want to keep memory consumption low
1113 // if possible. These two goals are at odds with each other: If memory
1114 // consumption were not an issue, we could just let the main thread produce
1115 // LLVM WorkItems at full speed, assuring maximal utilization of
1116 // Tokens/LLVM worker threads. However, since codegen usual is faster
1117 // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1118 // WorkItem potentially holds on to a substantial amount of memory.
1120 // So the actual goal is to always produce just enough LLVM WorkItems as
1121 // not to starve our LLVM worker threads. That means, once we have enough
1122 // WorkItems in our queue, we can block the main thread, so it does not
1123 // produce more until we need them.
1125 // Doing LLVM Work on the Main Thread
1126 // ----------------------------------
1127 // Since the main thread owns the compiler processes implicit `Token`, it is
1128 // wasteful to keep it blocked without doing any work. Therefore, what we do
1129 // in this case is: We spawn off an additional LLVM worker thread that helps
1130 // reduce the queue. The work it is doing corresponds to the implicit
1131 // `Token`. The coordinator will mark the main thread as being busy with
1132 // LLVM work. (The actual work happens on another OS thread but we just care
1133 // about `Tokens`, not actual threads).
1135 // When any LLVM worker thread finishes while the main thread is marked as
1136 // "busy with LLVM work", we can do a little switcheroo: We give the Token
1137 // of the just finished thread to the LLVM worker thread that is working on
1138 // behalf of the main thread's implicit Token, thus freeing up the main
1139 // thread again. The coordinator can then again decide what the main thread
1140 // should do. This allows the coordinator to make decisions at more points
1143 // Striking a Balance between Throughput and Memory Consumption
1144 // ------------------------------------------------------------
1145 // Since our two goals, (1) use as many Tokens as possible and (2) keep
1146 // memory consumption as low as possible, are in conflict with each other,
1147 // we have to find a trade off between them. Right now, the goal is to keep
1148 // all workers busy, which means that no worker should find the queue empty
1149 // when it is ready to start.
1150 // How do we do achieve this? Good question :) We actually never know how
1151 // many `Tokens` are potentially available so it's hard to say how much to
1152 // fill up the queue before switching the main thread to LLVM work. Also we
1153 // currently don't have a means to estimate how long a running LLVM worker
1154 // will still be busy with it's current WorkItem. However, we know the
1155 // maximal count of available Tokens that makes sense (=the number of CPU
1156 // cores), so we can take a conservative guess. The heuristic we use here
1157 // is implemented in the `queue_full_enough()` function.
1159 // Some Background on Jobservers
1160 // -----------------------------
1161 // It's worth also touching on the management of parallelism here. We don't
1162 // want to just spawn a thread per work item because while that's optimal
1163 // parallelism it may overload a system with too many threads or violate our
1164 // configuration for the maximum amount of cpu to use for this process. To
1165 // manage this we use the `jobserver` crate.
1167 // Job servers are an artifact of GNU make and are used to manage
1168 // parallelism between processes. A jobserver is a glorified IPC semaphore
1169 // basically. Whenever we want to run some work we acquire the semaphore,
1170 // and whenever we're done with that work we release the semaphore. In this
1171 // manner we can ensure that the maximum number of parallel workers is
1172 // capped at any one point in time.
1174 // LTO and the coordinator thread
1175 // ------------------------------
1177 // The final job the coordinator thread is responsible for is managing LTO
1178 // and how that works. When LTO is requested what we'll to is collect all
1179 // optimized LLVM modules into a local vector on the coordinator. Once all
1180 // modules have been codegened and optimized we hand this to the `lto`
1181 // module for further optimization. The `lto` module will return back a list
1182 // of more modules to work on, which the coordinator will continue to spawn
1185 // Each LLVM module is automatically sent back to the coordinator for LTO if
1186 // necessary. There's already optimizations in place to avoid sending work
1187 // back to the coordinator if LTO isn't requested.
1188 return thread::spawn(move || {
1189 let max_workers = ::num_cpus::get();
1190 let mut worker_id_counter = 0;
1191 let mut free_worker_ids = Vec::new();
1192 let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
1193 if let Some(id) = free_worker_ids.pop() {
1196 let id = worker_id_counter;
1197 worker_id_counter += 1;
1202 // This is where we collect codegen units that have gone all the way
1203 // through codegen and LLVM.
1204 let mut compiled_modules = vec![];
1205 let mut compiled_metadata_module = None;
1206 let mut compiled_allocator_module = None;
1207 let mut needs_fat_lto = Vec::new();
1208 let mut needs_thin_lto = Vec::new();
1209 let mut lto_import_only_modules = Vec::new();
1210 let mut started_lto = false;
1211 let mut codegen_aborted = false;
1213 // This flag tracks whether all items have gone through codegens
1214 let mut codegen_done = false;
1216 // This is the queue of LLVM work items that still need processing.
1217 let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
1219 // This are the Jobserver Tokens we currently hold. Does not include
1220 // the implicit Token the compiler process owns no matter what.
1221 let mut tokens = Vec::new();
1223 let mut main_thread_worker_state = MainThreadWorkerState::Idle;
1224 let mut running = 0;
1226 let prof = &cgcx.prof;
1227 let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
1229 // Run the message loop while there's still anything that needs message
1230 // processing. Note that as soon as codegen is aborted we simply want to
1231 // wait for all existing work to finish, so many of the conditions here
1232 // only apply if codegen hasn't been aborted as they represent pending
1236 || (!codegen_aborted
1237 && (work_items.len() > 0
1238 || needs_fat_lto.len() > 0
1239 || needs_thin_lto.len() > 0
1240 || lto_import_only_modules.len() > 0
1241 || main_thread_worker_state != MainThreadWorkerState::Idle))
1243 // While there are still CGUs to be codegened, the coordinator has
1244 // to decide how to utilize the compiler processes implicit Token:
1245 // For codegenning more CGU or for running them through LLVM.
1247 if main_thread_worker_state == MainThreadWorkerState::Idle {
1248 if !queue_full_enough(work_items.len(), running, max_workers) {
1249 // The queue is not full enough, codegen more items:
1250 if let Err(_) = codegen_worker_send.send(Message::CodegenItem) {
1251 panic!("Could not send Message::CodegenItem to main thread")
1253 main_thread_worker_state = MainThreadWorkerState::Codegenning;
1255 // The queue is full enough to not let the worker
1256 // threads starve. Use the implicit Token to do some
1259 work_items.pop().expect("queue empty - queue_full_enough() broken?");
1260 let cgcx = CodegenContext {
1261 worker: get_worker_id(&mut free_worker_ids),
1264 maybe_start_llvm_timer(
1266 cgcx.config(item.module_kind()),
1267 &mut llvm_start_time,
1269 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1270 spawn_work(cgcx, item);
1273 } else if codegen_aborted {
1274 // don't queue up any more work if codegen was aborted, we're
1275 // just waiting for our existing children to finish
1277 // If we've finished everything related to normal codegen
1278 // then it must be the case that we've got some LTO work to do.
1279 // Perform the serial work here of figuring out what we're
1280 // going to LTO and then push a bunch of work items onto our
1282 if work_items.len() == 0
1284 && main_thread_worker_state == MainThreadWorkerState::Idle
1286 assert!(!started_lto);
1289 let needs_fat_lto = mem::take(&mut needs_fat_lto);
1290 let needs_thin_lto = mem::take(&mut needs_thin_lto);
1291 let import_only_modules = mem::take(&mut lto_import_only_modules);
1294 generate_lto_work(&cgcx, needs_fat_lto, needs_thin_lto, import_only_modules)
1296 let insertion_index = work_items
1297 .binary_search_by_key(&cost, |&(_, cost)| cost)
1298 .unwrap_or_else(|e| e);
1299 work_items.insert(insertion_index, (work, cost));
1300 if !cgcx.opts.debugging_opts.no_parallel_llvm {
1301 helper.request_token();
1306 // In this branch, we know that everything has been codegened,
1307 // so it's just a matter of determining whether the implicit
1308 // Token is free to use for LLVM work.
1309 match main_thread_worker_state {
1310 MainThreadWorkerState::Idle => {
1311 if let Some((item, _)) = work_items.pop() {
1312 let cgcx = CodegenContext {
1313 worker: get_worker_id(&mut free_worker_ids),
1316 maybe_start_llvm_timer(
1318 cgcx.config(item.module_kind()),
1319 &mut llvm_start_time,
1321 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1322 spawn_work(cgcx, item);
1324 // There is no unstarted work, so let the main thread
1325 // take over for a running worker. Otherwise the
1326 // implicit token would just go to waste.
1327 // We reduce the `running` counter by one. The
1328 // `tokens.truncate()` below will take care of
1329 // giving the Token back.
1330 debug_assert!(running > 0);
1332 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1335 MainThreadWorkerState::Codegenning => bug!(
1336 "codegen worker should not be codegenning after \
1337 codegen was already completed"
1339 MainThreadWorkerState::LLVMing => {
1340 // Already making good use of that token
1345 // Spin up what work we can, only doing this while we've got available
1346 // parallelism slots and work left to spawn.
1347 while !codegen_aborted && work_items.len() > 0 && running < tokens.len() {
1348 let (item, _) = work_items.pop().unwrap();
1350 maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
1353 CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
1355 spawn_work(cgcx, item);
1359 // Relinquish accidentally acquired extra tokens
1360 tokens.truncate(running);
1362 // If a thread exits successfully then we drop a token associated
1363 // with that worker and update our `running` count. We may later
1364 // re-acquire a token to continue running more work. We may also not
1365 // actually drop a token here if the worker was running with an
1366 // "ephemeral token"
1367 let mut free_worker = |worker_id| {
1368 if main_thread_worker_state == MainThreadWorkerState::LLVMing {
1369 main_thread_worker_state = MainThreadWorkerState::Idle;
1374 free_worker_ids.push(worker_id);
1377 let msg = coordinator_receive.recv().unwrap();
1378 match *msg.downcast::<Message<B>>().ok().unwrap() {
1379 // Save the token locally and the next turn of the loop will use
1380 // this to spawn a new unit of work, or it may get dropped
1381 // immediately if we have no more work to spawn.
1382 Message::Token(token) => {
1387 if main_thread_worker_state == MainThreadWorkerState::LLVMing {
1388 // If the main thread token is used for LLVM work
1389 // at the moment, we turn that thread into a regular
1390 // LLVM worker thread, so the main thread is free
1391 // to react to codegen demand.
1392 main_thread_worker_state = MainThreadWorkerState::Idle;
1397 let msg = &format!("failed to acquire jobserver token: {}", e);
1398 shared_emitter.fatal(msg);
1399 // Exit the coordinator thread
1405 Message::CodegenDone { llvm_work_item, cost } => {
1406 // We keep the queue sorted by estimated processing cost,
1407 // so that more expensive items are processed earlier. This
1408 // is good for throughput as it gives the main thread more
1409 // time to fill up the queue and it avoids scheduling
1410 // expensive items to the end.
1411 // Note, however, that this is not ideal for memory
1412 // consumption, as LLVM module sizes are not evenly
1414 let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1415 let insertion_index = match insertion_index {
1416 Ok(idx) | Err(idx) => idx,
1418 work_items.insert(insertion_index, (llvm_work_item, cost));
1420 if !cgcx.opts.debugging_opts.no_parallel_llvm {
1421 helper.request_token();
1423 assert!(!codegen_aborted);
1424 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1425 main_thread_worker_state = MainThreadWorkerState::Idle;
1428 Message::CodegenComplete => {
1429 codegen_done = true;
1430 assert!(!codegen_aborted);
1431 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1432 main_thread_worker_state = MainThreadWorkerState::Idle;
1435 // If codegen is aborted that means translation was aborted due
1436 // to some normal-ish compiler error. In this situation we want
1437 // to exit as soon as possible, but we want to make sure all
1438 // existing work has finished. Flag codegen as being done, and
1439 // then conditions above will ensure no more work is spawned but
1440 // we'll keep executing this loop until `running` hits 0.
1441 Message::CodegenAborted => {
1442 assert!(!codegen_aborted);
1443 codegen_done = true;
1444 codegen_aborted = true;
1445 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1447 Message::Done { result: Ok(compiled_module), worker_id } => {
1448 free_worker(worker_id);
1449 match compiled_module.kind {
1450 ModuleKind::Regular => {
1451 compiled_modules.push(compiled_module);
1453 ModuleKind::Metadata => {
1454 assert!(compiled_metadata_module.is_none());
1455 compiled_metadata_module = Some(compiled_module);
1457 ModuleKind::Allocator => {
1458 assert!(compiled_allocator_module.is_none());
1459 compiled_allocator_module = Some(compiled_module);
1463 Message::NeedsFatLTO { result, worker_id } => {
1464 assert!(!started_lto);
1465 free_worker(worker_id);
1466 needs_fat_lto.push(result);
1468 Message::NeedsThinLTO { name, thin_buffer, worker_id } => {
1469 assert!(!started_lto);
1470 free_worker(worker_id);
1471 needs_thin_lto.push((name, thin_buffer));
1473 Message::AddImportOnlyModule { module_data, work_product } => {
1474 assert!(!started_lto);
1475 assert!(!codegen_done);
1476 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1477 lto_import_only_modules.push((module_data, work_product));
1478 main_thread_worker_state = MainThreadWorkerState::Idle;
1480 // If the thread failed that means it panicked, so we abort immediately.
1481 Message::Done { result: Err(None), worker_id: _ } => {
1482 bug!("worker thread panicked");
1484 Message::Done { result: Err(Some(WorkerFatalError)), worker_id: _ } => {
1487 Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
1491 // Drop to print timings
1492 drop(llvm_start_time);
1494 // Regardless of what order these modules completed in, report them to
1495 // the backend in the same order every time to ensure that we're handing
1496 // out deterministic results.
1497 compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
1499 Ok(CompiledModules {
1500 modules: compiled_modules,
1501 metadata_module: compiled_metadata_module,
1502 allocator_module: compiled_allocator_module,
1506 // A heuristic that determines if we have enough LLVM WorkItems in the
1507 // queue so that the main thread can do LLVM work instead of codegen
1508 fn queue_full_enough(
1509 items_in_queue: usize,
1510 workers_running: usize,
1514 items_in_queue > 0 && items_in_queue >= max_workers.saturating_sub(workers_running / 2)
1517 fn maybe_start_llvm_timer<'a>(
1518 prof: &'a SelfProfilerRef,
1519 config: &ModuleConfig,
1520 llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1522 if config.time_module && llvm_start_time.is_none() {
1523 *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes"));
1528 pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
1530 /// `FatalError` is explicitly not `Send`.
1532 pub struct WorkerFatalError;
1534 fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
1535 thread::spawn(move || {
1536 // Set up a destructor which will fire off a message that we're done as
1538 struct Bomb<B: ExtraBackendMethods> {
1539 coordinator_send: Sender<Box<dyn Any + Send>>,
1540 result: Option<Result<WorkItemResult<B>, FatalError>>,
1543 impl<B: ExtraBackendMethods> Drop for Bomb<B> {
1544 fn drop(&mut self) {
1545 let worker_id = self.worker_id;
1546 let msg = match self.result.take() {
1547 Some(Ok(WorkItemResult::Compiled(m))) => {
1548 Message::Done::<B> { result: Ok(m), worker_id }
1550 Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
1551 Message::NeedsFatLTO::<B> { result: m, worker_id }
1553 Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
1554 Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
1556 Some(Err(FatalError)) => {
1557 Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
1559 None => Message::Done::<B> { result: Err(None), worker_id },
1561 drop(self.coordinator_send.send(Box::new(msg)));
1565 let mut bomb = Bomb::<B> {
1566 coordinator_send: cgcx.coordinator_send.clone(),
1568 worker_id: cgcx.worker,
1571 // Execute the work itself, and if it finishes successfully then flag
1572 // ourselves as a success as well.
1574 // Note that we ignore any `FatalError` coming out of `execute_work_item`,
1575 // as a diagnostic was already sent off to the main thread - just
1576 // surface that there was an error in this worker.
1578 let _prof_timer = cgcx.prof.generic_activity(work.profiling_event_id());
1579 Some(execute_work_item(&cgcx, work))
1584 pub fn run_assembler<B: ExtraBackendMethods>(
1585 cgcx: &CodegenContext<B>,
1590 let assembler = cgcx.assembler_cmd.as_ref().expect("cgcx.assembler_cmd is missing?");
1592 let pname = &assembler.name;
1593 let mut cmd = assembler.cmd.clone();
1594 cmd.arg("-c").arg("-o").arg(object).arg(assembly);
1595 debug!("{:?}", cmd);
1597 match cmd.output() {
1599 if !prog.status.success() {
1600 let mut note = prog.stderr.clone();
1601 note.extend_from_slice(&prog.stdout);
1604 .struct_err(&format!(
1605 "linking with `{}` failed: {}",
1609 .note(&format!("{:?}", &cmd))
1610 .note(str::from_utf8(¬e[..]).unwrap())
1612 handler.abort_if_errors();
1616 handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e));
1617 handler.abort_if_errors();
1622 enum SharedEmitterMessage {
1623 Diagnostic(Diagnostic),
1624 InlineAsmError(u32, String),
1630 pub struct SharedEmitter {
1631 sender: Sender<SharedEmitterMessage>,
1634 pub struct SharedEmitterMain {
1635 receiver: Receiver<SharedEmitterMessage>,
1638 impl SharedEmitter {
1639 pub fn new() -> (SharedEmitter, SharedEmitterMain) {
1640 let (sender, receiver) = channel();
1642 (SharedEmitter { sender }, SharedEmitterMain { receiver })
1645 pub fn inline_asm_error(&self, cookie: u32, msg: String) {
1646 drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg)));
1649 pub fn fatal(&self, msg: &str) {
1650 drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
1654 impl Emitter for SharedEmitter {
1655 fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
1656 drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1657 msg: diag.message(),
1658 code: diag.code.clone(),
1661 for child in &diag.children {
1662 drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1663 msg: child.message(),
1668 drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
1670 fn source_map(&self) -> Option<&Lrc<SourceMap>> {
1675 impl SharedEmitterMain {
1676 pub fn check(&self, sess: &Session, blocking: bool) {
1678 let message = if blocking {
1679 match self.receiver.recv() {
1680 Ok(message) => Ok(message),
1684 match self.receiver.try_recv() {
1685 Ok(message) => Ok(message),
1691 Ok(SharedEmitterMessage::Diagnostic(diag)) => {
1692 let handler = sess.diagnostic();
1693 let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg);
1694 if let Some(code) = diag.code {
1697 handler.emit_diagnostic(&d);
1699 Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => {
1700 sess.span_err(ExpnId::from_u32(cookie).expn_data().call_site, &msg)
1702 Ok(SharedEmitterMessage::AbortIfErrors) => {
1703 sess.abort_if_errors();
1705 Ok(SharedEmitterMessage::Fatal(msg)) => {
1716 pub struct OngoingCodegen<B: ExtraBackendMethods> {
1718 pub crate_name: Symbol,
1719 pub crate_hash: Svh,
1720 pub metadata: EncodedMetadata,
1721 pub windows_subsystem: Option<String>,
1722 pub linker_info: LinkerInfo,
1723 pub crate_info: CrateInfo,
1724 pub coordinator_send: Sender<Box<dyn Any + Send>>,
1725 pub codegen_worker_receive: Receiver<Message<B>>,
1726 pub shared_emitter_main: SharedEmitterMain,
1727 pub future: thread::JoinHandle<Result<CompiledModules, ()>>,
1728 pub output_filenames: Arc<OutputFilenames>,
1731 impl<B: ExtraBackendMethods> OngoingCodegen<B> {
1732 pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
1733 let _timer = sess.timer("finish_ongoing_codegen");
1735 self.shared_emitter_main.check(sess, true);
1736 let future = self.future;
1737 let compiled_modules = sess.time("join_worker_thread", || match future.join() {
1738 Ok(Ok(compiled_modules)) => compiled_modules,
1740 sess.abort_if_errors();
1741 panic!("expected abort due to worker thread errors")
1744 bug!("panic during codegen/LLVM phase");
1748 sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
1750 sess.abort_if_errors();
1753 copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
1754 produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
1756 // FIXME: time_llvm_passes support - does this use a global context or
1758 if sess.codegen_units() == 1 && sess.time_llvm_passes() {
1759 self.backend.print_pass_timings()
1764 crate_name: self.crate_name,
1765 crate_hash: self.crate_hash,
1766 metadata: self.metadata,
1767 windows_subsystem: self.windows_subsystem,
1768 linker_info: self.linker_info,
1769 crate_info: self.crate_info,
1771 modules: compiled_modules.modules,
1772 allocator_module: compiled_modules.allocator_module,
1773 metadata_module: compiled_modules.metadata_module,
1779 pub fn submit_pre_codegened_module_to_llvm(
1782 module: ModuleCodegen<B::Module>,
1784 self.wait_for_signal_to_codegen_item();
1785 self.check_for_errors(tcx.sess);
1787 // These are generally cheap and won't throw off scheduling.
1789 submit_codegened_module_to_llvm(&self.backend, &self.coordinator_send, module, cost);
1792 pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
1793 self.wait_for_signal_to_codegen_item();
1794 self.check_for_errors(tcx.sess);
1795 drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::<B>)));
1798 /// Consumes this context indicating that codegen was entirely aborted, and
1799 /// we need to exit as quickly as possible.
1801 /// This method blocks the current thread until all worker threads have
1802 /// finished, and all worker threads should have exited or be real close to
1803 /// exiting at this point.
1804 pub fn codegen_aborted(self) {
1805 // Signal to the coordinator it should spawn no more work and start
1807 drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::<B>)));
1808 drop(self.future.join());
1811 pub fn check_for_errors(&self, sess: &Session) {
1812 self.shared_emitter_main.check(sess, false);
1815 pub fn wait_for_signal_to_codegen_item(&self) {
1816 match self.codegen_worker_receive.recv() {
1817 Ok(Message::CodegenItem) => {
1820 Ok(_) => panic!("unexpected message"),
1822 // One of the LLVM threads must have panicked, fall through so
1823 // error handling can be reached.
1829 pub fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
1831 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1832 module: ModuleCodegen<B::Module>,
1835 let llvm_work_item = WorkItem::Optimize(module);
1836 drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost })));
1839 pub fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
1841 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1842 module: CachedModuleCodegen,
1844 let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
1845 drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost: 0 })));
1848 pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
1851 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1852 module: CachedModuleCodegen,
1854 let filename = pre_lto_bitcode_filename(&module.name);
1855 let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
1856 let file = fs::File::open(&bc_path)
1857 .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
1860 memmap::Mmap::map(&file).unwrap_or_else(|e| {
1861 panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
1864 // Schedule the module to be loaded
1865 drop(tx_to_llvm_workers.send(Box::new(Message::AddImportOnlyModule::<B> {
1866 module_data: SerializedModule::FromUncompressedFile(mmap),
1867 work_product: module.source,
1871 pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
1872 format!("{}.{}", module_name, PRE_LTO_BC_EXT)
1875 fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
1876 // This should never be true (because it's not supported). If it is true,
1877 // something is wrong with commandline arg validation.
1879 !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
1880 && tcx.sess.target.target.options.is_like_msvc
1881 && tcx.sess.opts.cg.prefer_dynamic)
1884 tcx.sess.target.target.options.is_like_msvc &&
1885 tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) &&
1886 // ThinLTO can't handle this workaround in all cases, so we don't
1887 // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
1888 // dynamic linking when linker plugin LTO is enabled.
1889 !tcx.sess.opts.cg.linker_plugin_lto.enabled()