1 use super::command::Command;
2 use super::link::{self, get_linker, remove};
3 use super::linker::LinkerInfo;
4 use super::lto::{self, SerializedModule};
5 use super::symbol_export::ExportedSymbols;
7 CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
8 RLIB_BYTECODE_EXTENSION,
12 use jobserver::{Acquired, Client};
13 use rustc::dep_graph::{WorkProduct, WorkProductFileKind, WorkProductId};
14 use rustc::middle::cstore::EncodedMetadata;
15 use rustc::session::config::{
16 self, Lto, OutputFilenames, OutputType, Passes, Sanitizer, SwitchWithOptPath,
18 use rustc::session::Session;
19 use rustc::ty::TyCtxt;
20 use rustc_data_structures::fx::FxHashMap;
21 use rustc_data_structures::profiling::SelfProfilerRef;
22 use rustc_data_structures::profiling::VerboseTimingGuard;
23 use rustc_data_structures::svh::Svh;
24 use rustc_data_structures::sync::Lrc;
25 use rustc_errors::emitter::Emitter;
26 use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
27 use rustc_fs_util::link_or_copy;
28 use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
29 use rustc_incremental::{
30 copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
32 use rustc_session::cgu_reuse_tracker::CguReuseTracker;
33 use rustc_span::hygiene::ExpnId;
34 use rustc_span::source_map::SourceMap;
35 use rustc_span::symbol::{sym, Symbol};
36 use rustc_target::spec::MergeFunctions;
43 use std::path::{Path, PathBuf};
45 use std::sync::mpsc::{channel, Receiver, Sender};
49 const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
51 /// Module-specific configuration for `optimize_and_codegen`.
52 pub struct ModuleConfig {
53 /// Names of additional optimization passes to run.
54 pub passes: Vec<String>,
55 /// Some(level) to optimize at a certain level, or None to run
56 /// absolutely no optimizations (used for the metadata module).
57 pub opt_level: Option<config::OptLevel>,
59 /// Some(level) to optimize binary size, or None to not affect program size.
60 pub opt_size: Option<config::OptLevel>,
62 pub pgo_gen: SwitchWithOptPath,
63 pub pgo_use: Option<PathBuf>,
65 pub sanitizer: Option<Sanitizer>,
66 pub sanitizer_recover: Vec<Sanitizer>,
67 pub sanitizer_memory_track_origins: usize,
69 // Flags indicating which outputs to produce.
70 pub emit_pre_lto_bc: bool,
71 pub emit_no_opt_bc: bool,
73 pub emit_bc_compressed: bool,
74 pub emit_lto_bc: bool,
78 // Miscellaneous flags. These are mostly copied from command-line
80 pub verify_llvm_ir: bool,
81 pub no_prepopulate_passes: bool,
82 pub no_builtins: bool,
83 pub time_module: bool,
84 pub vectorize_loop: bool,
85 pub vectorize_slp: bool,
86 pub merge_functions: bool,
87 pub inline_threshold: Option<usize>,
88 // Instead of creating an object file by doing LLVM codegen, just
89 // make the object file bitcode. Provides easy compatibility with
90 // emscripten's ecc compiler, when used as the linker.
91 pub obj_is_bitcode: bool,
92 pub no_integrated_as: bool,
93 pub embed_bitcode: bool,
94 pub embed_bitcode_marker: bool,
98 fn new(passes: Vec<String>) -> ModuleConfig {
104 pgo_gen: SwitchWithOptPath::Disabled,
108 sanitizer_recover: Default::default(),
109 sanitizer_memory_track_origins: 0,
111 emit_no_opt_bc: false,
112 emit_pre_lto_bc: false,
114 emit_bc_compressed: false,
119 obj_is_bitcode: false,
120 embed_bitcode: false,
121 embed_bitcode_marker: false,
122 no_integrated_as: false,
124 verify_llvm_ir: false,
125 no_prepopulate_passes: false,
128 vectorize_loop: false,
129 vectorize_slp: false,
130 merge_functions: false,
131 inline_threshold: None,
135 fn set_flags(&mut self, sess: &Session, no_builtins: bool) {
136 self.verify_llvm_ir = sess.verify_llvm_ir();
137 self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes;
138 self.no_builtins = no_builtins || sess.target.target.options.no_builtins;
139 self.inline_threshold = sess.opts.cg.inline_threshold;
140 self.obj_is_bitcode =
141 sess.target.target.options.obj_is_bitcode || sess.opts.cg.linker_plugin_lto.enabled();
143 sess.target.target.options.embed_bitcode || sess.opts.debugging_opts.embed_bitcode;
145 match sess.opts.optimize {
146 config::OptLevel::No | config::OptLevel::Less => {
147 self.embed_bitcode_marker = embed_bitcode;
149 _ => self.embed_bitcode = embed_bitcode,
153 // Copy what clang does by turning on loop vectorization at O2 and
154 // slp vectorization at O3. Otherwise configure other optimization aspects
155 // of this pass manager builder.
156 self.vectorize_loop = !sess.opts.cg.no_vectorize_loops
157 && (sess.opts.optimize == config::OptLevel::Default
158 || sess.opts.optimize == config::OptLevel::Aggressive);
161 !sess.opts.cg.no_vectorize_slp && sess.opts.optimize == config::OptLevel::Aggressive;
163 // Some targets (namely, NVPTX) interact badly with the MergeFunctions
164 // pass. This is because MergeFunctions can generate new function calls
165 // which may interfere with the target calling convention; e.g. for the
166 // NVPTX target, PTX kernels should not call other PTX kernels.
167 // MergeFunctions can also be configured to generate aliases instead,
168 // but aliases are not supported by some backends (again, NVPTX).
169 // Therefore, allow targets to opt out of the MergeFunctions pass,
170 // but otherwise keep the pass enabled (at O2 and O3) since it can be
171 // useful for reducing code size.
172 self.merge_functions = match sess
176 .unwrap_or(sess.target.target.options.merge_functions)
178 MergeFunctions::Disabled => false,
179 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
180 sess.opts.optimize == config::OptLevel::Default
181 || sess.opts.optimize == config::OptLevel::Aggressive
186 pub fn bitcode_needed(&self) -> bool {
187 self.emit_bc || self.obj_is_bitcode || self.emit_bc_compressed || self.embed_bitcode
191 /// Assembler name and command used by codegen when no_integrated_as is enabled
192 pub struct AssemblerCommand {
197 // HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`.
198 pub struct TargetMachineFactory<B: WriteBackendMethods>(
199 pub Arc<dyn Fn() -> Result<B::TargetMachine, String> + Send + Sync>,
202 impl<B: WriteBackendMethods> Clone for TargetMachineFactory<B> {
203 fn clone(&self) -> Self {
204 TargetMachineFactory(self.0.clone())
208 /// Additional resources used by optimize_and_codegen (not module specific)
210 pub struct CodegenContext<B: WriteBackendMethods> {
211 // Resources needed when running LTO
213 pub prof: SelfProfilerRef,
215 pub no_landing_pads: bool,
216 pub save_temps: bool,
217 pub fewer_names: bool,
218 pub exported_symbols: Option<Arc<ExportedSymbols>>,
219 pub opts: Arc<config::Options>,
220 pub crate_types: Vec<config::CrateType>,
221 pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
222 pub output_filenames: Arc<OutputFilenames>,
223 pub regular_module_config: Arc<ModuleConfig>,
224 pub metadata_module_config: Arc<ModuleConfig>,
225 pub allocator_module_config: Arc<ModuleConfig>,
226 pub tm_factory: TargetMachineFactory<B>,
227 pub msvc_imps_needed: bool,
228 pub target_pointer_width: String,
229 pub target_arch: String,
230 pub debuginfo: config::DebugInfo,
232 // Number of cgus excluding the allocator/metadata modules
233 pub total_cgus: usize,
234 // Handler to use for diagnostics produced during codegen.
235 pub diag_emitter: SharedEmitter,
236 // LLVM optimizations for which we want to print remarks.
238 // Worker thread number
240 // The incremental compilation session directory, or None if we are not
241 // compiling incrementally
242 pub incr_comp_session_dir: Option<PathBuf>,
243 // Used to update CGU re-use information during the thinlto phase.
244 pub cgu_reuse_tracker: CguReuseTracker,
245 // Channel back to the main control thread to send messages to
246 pub coordinator_send: Sender<Box<dyn Any + Send>>,
247 // The assembler command if no_integrated_as option is enabled, None otherwise
248 pub assembler_cmd: Option<Arc<AssemblerCommand>>,
251 impl<B: WriteBackendMethods> CodegenContext<B> {
252 pub fn create_diag_handler(&self) -> Handler {
253 Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
256 pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
258 ModuleKind::Regular => &self.regular_module_config,
259 ModuleKind::Metadata => &self.metadata_module_config,
260 ModuleKind::Allocator => &self.allocator_module_config,
265 fn generate_lto_work<B: ExtraBackendMethods>(
266 cgcx: &CodegenContext<B>,
267 needs_fat_lto: Vec<FatLTOInput<B>>,
268 needs_thin_lto: Vec<(String, B::ThinBuffer)>,
269 import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
270 ) -> Vec<(WorkItem<B>, u64)> {
271 let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
273 let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
274 assert!(needs_thin_lto.is_empty());
276 B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
277 (vec![lto_module], vec![])
279 assert!(needs_fat_lto.is_empty());
280 B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
283 let result = lto_modules
286 let cost = module.cost();
287 (WorkItem::LTO(module), cost)
289 .chain(copy_jobs.into_iter().map(|wp| {
291 WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
292 name: wp.cgu_name.clone(),
303 pub struct CompiledModules {
304 pub modules: Vec<CompiledModule>,
305 pub metadata_module: Option<CompiledModule>,
306 pub allocator_module: Option<CompiledModule>,
309 fn need_crate_bitcode_for_rlib(sess: &Session) -> bool {
310 sess.crate_types.borrow().contains(&config::CrateType::Rlib)
311 && sess.opts.output_types.contains_key(&OutputType::Exe)
314 fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
315 if sess.opts.incremental.is_none() {
321 Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
325 pub fn start_async_codegen<B: ExtraBackendMethods>(
328 metadata: EncodedMetadata,
330 ) -> OngoingCodegen<B> {
331 let (coordinator_send, coordinator_receive) = channel();
334 let crate_name = tcx.crate_name(LOCAL_CRATE);
335 let crate_hash = tcx.crate_hash(LOCAL_CRATE);
336 let no_builtins = attr::contains_name(&tcx.hir().krate().attrs, sym::no_builtins);
338 attr::first_attr_value_str_by_name(&tcx.hir().krate().attrs, sym::windows_subsystem);
339 let windows_subsystem = subsystem.map(|subsystem| {
340 if subsystem != sym::windows && subsystem != sym::console {
341 tcx.sess.fatal(&format!(
342 "invalid windows subsystem `{}`, only \
343 `windows` and `console` are allowed",
347 subsystem.to_string()
350 let linker_info = LinkerInfo::new(tcx);
351 let crate_info = CrateInfo::new(tcx);
353 // Figure out what we actually need to build.
354 let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone());
355 let mut metadata_config = ModuleConfig::new(vec![]);
356 let mut allocator_config = ModuleConfig::new(vec![]);
358 if sess.opts.debugging_opts.profile {
359 modules_config.passes.push("insert-gcov-profiling".to_owned())
362 modules_config.pgo_gen = sess.opts.cg.profile_generate.clone();
363 modules_config.pgo_use = sess.opts.cg.profile_use.clone();
364 modules_config.sanitizer = sess.opts.debugging_opts.sanitizer.clone();
365 modules_config.sanitizer_recover = sess.opts.debugging_opts.sanitizer_recover.clone();
366 modules_config.sanitizer_memory_track_origins =
367 sess.opts.debugging_opts.sanitizer_memory_track_origins;
368 modules_config.opt_level = Some(sess.opts.optimize);
369 modules_config.opt_size = Some(sess.opts.optimize);
371 // Save all versions of the bytecode if we're saving our temporaries.
372 if sess.opts.cg.save_temps {
373 modules_config.emit_no_opt_bc = true;
374 modules_config.emit_pre_lto_bc = true;
375 modules_config.emit_bc = true;
376 modules_config.emit_lto_bc = true;
377 metadata_config.emit_bc = true;
378 allocator_config.emit_bc = true;
381 // Emit compressed bitcode files for the crate if we're emitting an rlib.
382 // Whenever an rlib is created, the bitcode is inserted into the archive in
383 // order to allow LTO against it.
384 if need_crate_bitcode_for_rlib(sess) {
385 modules_config.emit_bc_compressed = true;
386 allocator_config.emit_bc_compressed = true;
389 modules_config.emit_pre_lto_bc = need_pre_lto_bitcode_for_incr_comp(sess);
391 modules_config.no_integrated_as =
392 tcx.sess.opts.cg.no_integrated_as || tcx.sess.target.target.options.no_integrated_as;
394 for output_type in sess.opts.output_types.keys() {
396 OutputType::Bitcode => {
397 modules_config.emit_bc = true;
399 OutputType::LlvmAssembly => {
400 modules_config.emit_ir = true;
402 OutputType::Assembly => {
403 modules_config.emit_asm = true;
404 // If we're not using the LLVM assembler, this function
405 // could be invoked specially with output_type_assembly, so
406 // in this case we still want the metadata object file.
407 if !sess.opts.output_types.contains_key(&OutputType::Assembly) {
408 metadata_config.emit_obj = true;
409 allocator_config.emit_obj = true;
412 OutputType::Object => {
413 modules_config.emit_obj = true;
415 OutputType::Metadata => {
416 metadata_config.emit_obj = true;
419 modules_config.emit_obj = true;
420 metadata_config.emit_obj = true;
421 allocator_config.emit_obj = true;
423 OutputType::Mir => {}
424 OutputType::DepInfo => {}
428 modules_config.set_flags(sess, no_builtins);
429 metadata_config.set_flags(sess, no_builtins);
430 allocator_config.set_flags(sess, no_builtins);
432 // Exclude metadata and allocator modules from time_passes output, since
433 // they throw off the "LLVM passes" measurement.
434 metadata_config.time_module = false;
435 allocator_config.time_module = false;
437 let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
438 let (codegen_worker_send, codegen_worker_receive) = channel();
440 let coordinator_thread = start_executing_work(
448 sess.jobserver.clone(),
449 Arc::new(modules_config),
450 Arc::new(metadata_config),
451 Arc::new(allocator_config),
452 coordinator_send.clone(),
465 codegen_worker_receive,
467 future: coordinator_thread,
468 output_filenames: tcx.output_filenames(LOCAL_CRATE),
472 fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
474 compiled_modules: &CompiledModules,
475 ) -> FxHashMap<WorkProductId, WorkProduct> {
476 let mut work_products = FxHashMap::default();
478 if sess.opts.incremental.is_none() {
479 return work_products;
482 let _timer = sess.timer("incr_comp_copy_cgu_workproducts");
484 for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
485 let mut files = vec![];
487 if let Some(ref path) = module.object {
488 files.push((WorkProductFileKind::Object, path.clone()));
490 if let Some(ref path) = module.bytecode {
491 files.push((WorkProductFileKind::Bytecode, path.clone()));
493 if let Some(ref path) = module.bytecode_compressed {
494 files.push((WorkProductFileKind::BytecodeCompressed, path.clone()));
497 if let Some((id, product)) =
498 copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files)
500 work_products.insert(id, product);
507 fn produce_final_output_artifacts(
509 compiled_modules: &CompiledModules,
510 crate_output: &OutputFilenames,
512 let mut user_wants_bitcode = false;
513 let mut user_wants_objects = false;
515 // Produce final compile outputs.
516 let copy_gracefully = |from: &Path, to: &Path| {
517 if let Err(e) = fs::copy(from, to) {
518 sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
522 let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
523 if compiled_modules.modules.len() == 1 {
524 // 1) Only one codegen unit. In this case it's no difficulty
525 // to copy `foo.0.x` to `foo.x`.
526 let module_name = Some(&compiled_modules.modules[0].name[..]);
527 let path = crate_output.temp_path(output_type, module_name);
528 copy_gracefully(&path, &crate_output.path(output_type));
529 if !sess.opts.cg.save_temps && !keep_numbered {
530 // The user just wants `foo.x`, not `foo.#module-name#.x`.
534 let ext = crate_output
535 .temp_path(output_type, None)
542 if crate_output.outputs.contains_key(&output_type) {
543 // 2) Multiple codegen units, with `--emit foo=some_name`. We have
544 // no good solution for this case, so warn the user.
546 "ignoring emit path because multiple .{} files \
550 } else if crate_output.single_output_file.is_some() {
551 // 3) Multiple codegen units, with `-o some_name`. We have
552 // no good solution for this case, so warn the user.
554 "ignoring -o because multiple .{} files \
559 // 4) Multiple codegen units, but no explicit name. We
560 // just leave the `foo.0.x` files in place.
561 // (We don't have to do any work in this case.)
566 // Flag to indicate whether the user explicitly requested bitcode.
567 // Otherwise, we produced it only as a temporary output, and will need
569 for output_type in crate_output.outputs.keys() {
571 OutputType::Bitcode => {
572 user_wants_bitcode = true;
573 // Copy to .bc, but always keep the .0.bc. There is a later
574 // check to figure out if we should delete .0.bc files, or keep
575 // them for making an rlib.
576 copy_if_one_unit(OutputType::Bitcode, true);
578 OutputType::LlvmAssembly => {
579 copy_if_one_unit(OutputType::LlvmAssembly, false);
581 OutputType::Assembly => {
582 copy_if_one_unit(OutputType::Assembly, false);
584 OutputType::Object => {
585 user_wants_objects = true;
586 copy_if_one_unit(OutputType::Object, true);
588 OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
592 // Clean up unwanted temporary files.
594 // We create the following files by default:
595 // - #crate#.#module-name#.bc
596 // - #crate#.#module-name#.o
597 // - #crate#.crate.metadata.bc
598 // - #crate#.crate.metadata.o
599 // - #crate#.o (linked from crate.##.o)
600 // - #crate#.bc (copied from crate.##.bc)
601 // We may create additional files if requested by the user (through
602 // `-C save-temps` or `--emit=` flags).
604 if !sess.opts.cg.save_temps {
605 // Remove the temporary .#module-name#.o objects. If the user didn't
606 // explicitly request bitcode (with --emit=bc), and the bitcode is not
607 // needed for building an rlib, then we must remove .#module-name#.bc as
610 // Specific rules for keeping .#module-name#.bc:
611 // - If the user requested bitcode (`user_wants_bitcode`), and
612 // codegen_units > 1, then keep it.
613 // - If the user requested bitcode but codegen_units == 1, then we
614 // can toss .#module-name#.bc because we copied it to .bc earlier.
615 // - If we're not building an rlib and the user didn't request
616 // bitcode, then delete .#module-name#.bc.
617 // If you change how this works, also update back::link::link_rlib,
618 // where .#module-name#.bc files are (maybe) deleted after making an
620 let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
622 let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
624 let keep_numbered_objects =
625 needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
627 for module in compiled_modules.modules.iter() {
628 if let Some(ref path) = module.object {
629 if !keep_numbered_objects {
634 if let Some(ref path) = module.bytecode {
635 if !keep_numbered_bitcode {
641 if !user_wants_bitcode {
642 if let Some(ref metadata_module) = compiled_modules.metadata_module {
643 if let Some(ref path) = metadata_module.bytecode {
648 if let Some(ref allocator_module) = compiled_modules.allocator_module {
649 if let Some(ref path) = allocator_module.bytecode {
656 // We leave the following files around by default:
658 // - #crate#.crate.metadata.o
660 // These are used in linking steps and will be cleaned up afterward.
663 pub fn dump_incremental_data(_codegen_results: &CodegenResults) {
664 // FIXME(mw): This does not work at the moment because the situation has
665 // become more complicated due to incremental LTO. Now a CGU
666 // can have more than two caching states.
667 // println!("[incremental] Re-using {} out of {} modules",
668 // codegen_results.modules.iter().filter(|m| m.pre_existing).count(),
669 // codegen_results.modules.len());
672 pub enum WorkItem<B: WriteBackendMethods> {
673 /// Optimize a newly codegened, totally unoptimized module.
674 Optimize(ModuleCodegen<B::Module>),
675 /// Copy the post-LTO artifacts from the incremental cache to the output
677 CopyPostLtoArtifacts(CachedModuleCodegen),
678 /// Performs (Thin)LTO on the given module.
679 LTO(lto::LtoModuleCodegen<B>),
682 impl<B: WriteBackendMethods> WorkItem<B> {
683 pub fn module_kind(&self) -> ModuleKind {
685 WorkItem::Optimize(ref m) => m.kind,
686 WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
690 fn profiling_event_id(&self) -> &'static str {
692 WorkItem::Optimize(_) => "codegen_module_optimize",
693 WorkItem::CopyPostLtoArtifacts(_) => "codegen_copy_artifacts_from_incr_cache",
694 WorkItem::LTO(_) => "codegen_module_perform_lto",
699 enum WorkItemResult<B: WriteBackendMethods> {
700 Compiled(CompiledModule),
701 NeedsFatLTO(FatLTOInput<B>),
702 NeedsThinLTO(String, B::ThinBuffer),
705 pub enum FatLTOInput<B: WriteBackendMethods> {
706 Serialized { name: String, buffer: B::ModuleBuffer },
707 InMemory(ModuleCodegen<B::Module>),
710 fn execute_work_item<B: ExtraBackendMethods>(
711 cgcx: &CodegenContext<B>,
712 work_item: WorkItem<B>,
713 ) -> Result<WorkItemResult<B>, FatalError> {
714 let module_config = cgcx.config(work_item.module_kind());
717 WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
718 WorkItem::CopyPostLtoArtifacts(module) => {
719 execute_copy_from_cache_work_item(cgcx, module, module_config)
721 WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
725 // Actual LTO type we end up chosing based on multiple factors.
726 enum ComputedLtoType {
732 fn execute_optimize_work_item<B: ExtraBackendMethods>(
733 cgcx: &CodegenContext<B>,
734 module: ModuleCodegen<B::Module>,
735 module_config: &ModuleConfig,
736 ) -> Result<WorkItemResult<B>, FatalError> {
737 let diag_handler = cgcx.create_diag_handler();
740 B::optimize(cgcx, &diag_handler, &module, module_config)?;
743 // After we've done the initial round of optimizations we need to
744 // decide whether to synchronously codegen this module or ship it
745 // back to the coordinator thread for further LTO processing (which
746 // has to wait for all the initial modules to be optimized).
748 // If the linker does LTO, we don't have to do it. Note that we
749 // keep doing full LTO, if it is requested, as not to break the
750 // assumption that the output will be a single module.
751 let linker_does_lto = cgcx.opts.cg.linker_plugin_lto.enabled();
753 // When we're automatically doing ThinLTO for multi-codegen-unit
754 // builds we don't actually want to LTO the allocator modules if
755 // it shows up. This is due to various linker shenanigans that
756 // we'll encounter later.
757 let is_allocator = module.kind == ModuleKind::Allocator;
759 // We ignore a request for full crate grath LTO if the cate type
760 // is only an rlib, as there is no full crate graph to process,
761 // that'll happen later.
763 // This use case currently comes up primarily for targets that
764 // require LTO so the request for LTO is always unconditionally
765 // passed down to the backend, but we don't actually want to do
766 // anything about it yet until we've got a final product.
767 let is_rlib = cgcx.crate_types.len() == 1 && cgcx.crate_types[0] == config::CrateType::Rlib;
769 // Metadata modules never participate in LTO regardless of the lto
771 let lto_type = if module.kind == ModuleKind::Metadata {
775 Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
776 Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
777 Lto::Fat if !is_rlib => ComputedLtoType::Fat,
778 _ => ComputedLtoType::No,
782 // If we're doing some form of incremental LTO then we need to be sure to
783 // save our module to disk first.
784 let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
785 let filename = pre_lto_bitcode_filename(&module.name);
786 cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
792 ComputedLtoType::No => {
793 let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
794 WorkItemResult::Compiled(module)
796 ComputedLtoType::Thin => {
797 let (name, thin_buffer) = B::prepare_thin(module);
798 if let Some(path) = bitcode {
799 fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
800 panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
803 WorkItemResult::NeedsThinLTO(name, thin_buffer)
805 ComputedLtoType::Fat => match bitcode {
807 let (name, buffer) = B::serialize_module(module);
808 fs::write(&path, buffer.data()).unwrap_or_else(|e| {
809 panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
811 WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer })
813 None => WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module)),
818 fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
819 cgcx: &CodegenContext<B>,
820 module: CachedModuleCodegen,
821 module_config: &ModuleConfig,
822 ) -> Result<WorkItemResult<B>, FatalError> {
823 let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
824 let mut object = None;
825 let mut bytecode = None;
826 let mut bytecode_compressed = None;
827 for (kind, saved_file) in &module.source.saved_files {
828 let obj_out = match kind {
829 WorkProductFileKind::Object => {
830 let path = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name));
831 object = Some(path.clone());
834 WorkProductFileKind::Bytecode => {
835 let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(&module.name));
836 bytecode = Some(path.clone());
839 WorkProductFileKind::BytecodeCompressed => {
842 .temp_path(OutputType::Bitcode, Some(&module.name))
843 .with_extension(RLIB_BYTECODE_EXTENSION);
844 bytecode_compressed = Some(path.clone());
848 let source_file = in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
850 "copying pre-existing module `{}` from {:?} to {}",
855 if let Err(err) = link_or_copy(&source_file, &obj_out) {
856 let diag_handler = cgcx.create_diag_handler();
857 diag_handler.err(&format!(
858 "unable to copy {} to {}: {}",
859 source_file.display(),
866 assert_eq!(object.is_some(), module_config.emit_obj);
867 assert_eq!(bytecode.is_some(), module_config.emit_bc);
868 assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed);
870 Ok(WorkItemResult::Compiled(CompiledModule {
872 kind: ModuleKind::Regular,
879 fn execute_lto_work_item<B: ExtraBackendMethods>(
880 cgcx: &CodegenContext<B>,
881 mut module: lto::LtoModuleCodegen<B>,
882 module_config: &ModuleConfig,
883 ) -> Result<WorkItemResult<B>, FatalError> {
884 let diag_handler = cgcx.create_diag_handler();
887 let module = module.optimize(cgcx)?;
888 let module = B::codegen(cgcx, &diag_handler, module, module_config)?;
889 Ok(WorkItemResult::Compiled(module))
893 pub enum Message<B: WriteBackendMethods> {
894 Token(io::Result<Acquired>),
896 result: FatLTOInput<B>,
901 thin_buffer: B::ThinBuffer,
905 result: Result<CompiledModule, Option<WorkerFatalError>>,
909 llvm_work_item: WorkItem<B>,
912 AddImportOnlyModule {
913 module_data: SerializedModule<B::ModuleBuffer>,
914 work_product: WorkProduct,
923 code: Option<DiagnosticId>,
927 #[derive(PartialEq, Clone, Copy, Debug)]
928 enum MainThreadWorkerState {
934 fn start_executing_work<B: ExtraBackendMethods>(
937 crate_info: &CrateInfo,
938 shared_emitter: SharedEmitter,
939 codegen_worker_send: Sender<Message<B>>,
940 coordinator_receive: Receiver<Box<dyn Any + Send>>,
943 modules_config: Arc<ModuleConfig>,
944 metadata_config: Arc<ModuleConfig>,
945 allocator_config: Arc<ModuleConfig>,
946 tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
947 ) -> thread::JoinHandle<Result<CompiledModules, ()>> {
948 let coordinator_send = tx_to_llvm_workers;
951 // Compute the set of symbols we need to retain when doing LTO (if we need to)
952 let exported_symbols = {
953 let mut exported_symbols = FxHashMap::default();
955 let copy_symbols = |cnum| {
957 .exported_symbols(cnum)
959 .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl))
967 exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
968 Some(Arc::new(exported_symbols))
970 Lto::Fat | Lto::Thin => {
971 exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
972 for &cnum in tcx.crates().iter() {
973 exported_symbols.insert(cnum, copy_symbols(cnum));
975 Some(Arc::new(exported_symbols))
980 // First up, convert our jobserver into a helper thread so we can use normal
981 // mpsc channels to manage our messages and such.
982 // After we've requested tokens then we'll, when we can,
983 // get tokens on `coordinator_receive` which will
984 // get managed in the main loop below.
985 let coordinator_send2 = coordinator_send.clone();
986 let helper = jobserver
987 .into_helper_thread(move |token| {
988 drop(coordinator_send2.send(Box::new(Message::Token::<B>(token))));
990 .expect("failed to spawn helper thread");
992 let mut each_linked_rlib_for_lto = Vec::new();
993 drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
994 if link::ignored_for_lto(sess, crate_info, cnum) {
997 each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
1000 let assembler_cmd = if modules_config.no_integrated_as {
1001 // HACK: currently we use linker (gcc) as our assembler
1002 let (linker, flavor) = link::linker_and_flavor(sess);
1004 let (name, mut cmd) = get_linker(sess, &linker, flavor);
1005 cmd.args(&sess.target.target.options.asm_args);
1006 Some(Arc::new(AssemblerCommand { name, cmd }))
1011 let ol = if tcx.sess.opts.debugging_opts.no_codegen
1012 || !tcx.sess.opts.output_types.should_codegen()
1014 // If we know that we won’t be doing codegen, create target machines without optimisation.
1015 config::OptLevel::No
1017 tcx.backend_optimization_level(LOCAL_CRATE)
1019 let cgcx = CodegenContext::<B> {
1020 backend: backend.clone(),
1021 crate_types: sess.crate_types.borrow().clone(),
1022 each_linked_rlib_for_lto,
1024 no_landing_pads: sess.no_landing_pads(),
1025 fewer_names: sess.fewer_names(),
1026 save_temps: sess.opts.cg.save_temps,
1027 opts: Arc::new(sess.opts.clone()),
1028 prof: sess.prof.clone(),
1030 remark: sess.opts.cg.remark.clone(),
1032 incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1033 cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
1035 diag_emitter: shared_emitter.clone(),
1036 output_filenames: tcx.output_filenames(LOCAL_CRATE),
1037 regular_module_config: modules_config,
1038 metadata_module_config: metadata_config,
1039 allocator_module_config: allocator_config,
1040 tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol, false)),
1042 msvc_imps_needed: msvc_imps_needed(tcx),
1043 target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
1044 target_arch: tcx.sess.target.target.arch.clone(),
1045 debuginfo: tcx.sess.opts.debuginfo,
1049 // This is the "main loop" of parallel work happening for parallel codegen.
1050 // It's here that we manage parallelism, schedule work, and work with
1051 // messages coming from clients.
1053 // There are a few environmental pre-conditions that shape how the system
1056 // - Error reporting only can happen on the main thread because that's the
1057 // only place where we have access to the compiler `Session`.
1058 // - LLVM work can be done on any thread.
1059 // - Codegen can only happen on the main thread.
1060 // - Each thread doing substantial work most be in possession of a `Token`
1061 // from the `Jobserver`.
1062 // - The compiler process always holds one `Token`. Any additional `Tokens`
1063 // have to be requested from the `Jobserver`.
1067 // The error reporting restriction is handled separately from the rest: We
1068 // set up a `SharedEmitter` the holds an open channel to the main thread.
1069 // When an error occurs on any thread, the shared emitter will send the
1070 // error message to the receiver main thread (`SharedEmitterMain`). The
1071 // main thread will periodically query this error message queue and emit
1072 // any error messages it has received. It might even abort compilation if
1073 // has received a fatal error. In this case we rely on all other threads
1074 // being torn down automatically with the main thread.
1075 // Since the main thread will often be busy doing codegen work, error
1076 // reporting will be somewhat delayed, since the message queue can only be
1077 // checked in between to work packages.
1079 // Work Processing Infrastructure
1080 // ==============================
1081 // The work processing infrastructure knows three major actors:
1083 // - the coordinator thread,
1084 // - the main thread, and
1085 // - LLVM worker threads
1087 // The coordinator thread is running a message loop. It instructs the main
1088 // thread about what work to do when, and it will spawn off LLVM worker
1089 // threads as open LLVM WorkItems become available.
1091 // The job of the main thread is to codegen CGUs into LLVM work package
1092 // (since the main thread is the only thread that can do this). The main
1093 // thread will block until it receives a message from the coordinator, upon
1094 // which it will codegen one CGU, send it to the coordinator and block
1095 // again. This way the coordinator can control what the main thread is
1098 // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1099 // available, it will spawn off a new LLVM worker thread and let it process
1100 // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
1101 // it will just shut down, which also frees all resources associated with
1102 // the given LLVM module, and sends a message to the coordinator that the
1103 // has been completed.
1107 // The scheduler's goal is to minimize the time it takes to complete all
1108 // work there is, however, we also want to keep memory consumption low
1109 // if possible. These two goals are at odds with each other: If memory
1110 // consumption were not an issue, we could just let the main thread produce
1111 // LLVM WorkItems at full speed, assuring maximal utilization of
1112 // Tokens/LLVM worker threads. However, since codegen usual is faster
1113 // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1114 // WorkItem potentially holds on to a substantial amount of memory.
1116 // So the actual goal is to always produce just enough LLVM WorkItems as
1117 // not to starve our LLVM worker threads. That means, once we have enough
1118 // WorkItems in our queue, we can block the main thread, so it does not
1119 // produce more until we need them.
1121 // Doing LLVM Work on the Main Thread
1122 // ----------------------------------
1123 // Since the main thread owns the compiler processes implicit `Token`, it is
1124 // wasteful to keep it blocked without doing any work. Therefore, what we do
1125 // in this case is: We spawn off an additional LLVM worker thread that helps
1126 // reduce the queue. The work it is doing corresponds to the implicit
1127 // `Token`. The coordinator will mark the main thread as being busy with
1128 // LLVM work. (The actual work happens on another OS thread but we just care
1129 // about `Tokens`, not actual threads).
1131 // When any LLVM worker thread finishes while the main thread is marked as
1132 // "busy with LLVM work", we can do a little switcheroo: We give the Token
1133 // of the just finished thread to the LLVM worker thread that is working on
1134 // behalf of the main thread's implicit Token, thus freeing up the main
1135 // thread again. The coordinator can then again decide what the main thread
1136 // should do. This allows the coordinator to make decisions at more points
1139 // Striking a Balance between Throughput and Memory Consumption
1140 // ------------------------------------------------------------
1141 // Since our two goals, (1) use as many Tokens as possible and (2) keep
1142 // memory consumption as low as possible, are in conflict with each other,
1143 // we have to find a trade off between them. Right now, the goal is to keep
1144 // all workers busy, which means that no worker should find the queue empty
1145 // when it is ready to start.
1146 // How do we do achieve this? Good question :) We actually never know how
1147 // many `Tokens` are potentially available so it's hard to say how much to
1148 // fill up the queue before switching the main thread to LLVM work. Also we
1149 // currently don't have a means to estimate how long a running LLVM worker
1150 // will still be busy with it's current WorkItem. However, we know the
1151 // maximal count of available Tokens that makes sense (=the number of CPU
1152 // cores), so we can take a conservative guess. The heuristic we use here
1153 // is implemented in the `queue_full_enough()` function.
1155 // Some Background on Jobservers
1156 // -----------------------------
1157 // It's worth also touching on the management of parallelism here. We don't
1158 // want to just spawn a thread per work item because while that's optimal
1159 // parallelism it may overload a system with too many threads or violate our
1160 // configuration for the maximum amount of cpu to use for this process. To
1161 // manage this we use the `jobserver` crate.
1163 // Job servers are an artifact of GNU make and are used to manage
1164 // parallelism between processes. A jobserver is a glorified IPC semaphore
1165 // basically. Whenever we want to run some work we acquire the semaphore,
1166 // and whenever we're done with that work we release the semaphore. In this
1167 // manner we can ensure that the maximum number of parallel workers is
1168 // capped at any one point in time.
1170 // LTO and the coordinator thread
1171 // ------------------------------
1173 // The final job the coordinator thread is responsible for is managing LTO
1174 // and how that works. When LTO is requested what we'll to is collect all
1175 // optimized LLVM modules into a local vector on the coordinator. Once all
1176 // modules have been codegened and optimized we hand this to the `lto`
1177 // module for further optimization. The `lto` module will return back a list
1178 // of more modules to work on, which the coordinator will continue to spawn
1181 // Each LLVM module is automatically sent back to the coordinator for LTO if
1182 // necessary. There's already optimizations in place to avoid sending work
1183 // back to the coordinator if LTO isn't requested.
1184 return thread::spawn(move || {
1185 let max_workers = ::num_cpus::get();
1186 let mut worker_id_counter = 0;
1187 let mut free_worker_ids = Vec::new();
1188 let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
1189 if let Some(id) = free_worker_ids.pop() {
1192 let id = worker_id_counter;
1193 worker_id_counter += 1;
1198 // This is where we collect codegen units that have gone all the way
1199 // through codegen and LLVM.
1200 let mut compiled_modules = vec![];
1201 let mut compiled_metadata_module = None;
1202 let mut compiled_allocator_module = None;
1203 let mut needs_fat_lto = Vec::new();
1204 let mut needs_thin_lto = Vec::new();
1205 let mut lto_import_only_modules = Vec::new();
1206 let mut started_lto = false;
1207 let mut codegen_aborted = false;
1209 // This flag tracks whether all items have gone through codegens
1210 let mut codegen_done = false;
1212 // This is the queue of LLVM work items that still need processing.
1213 let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
1215 // This are the Jobserver Tokens we currently hold. Does not include
1216 // the implicit Token the compiler process owns no matter what.
1217 let mut tokens = Vec::new();
1219 let mut main_thread_worker_state = MainThreadWorkerState::Idle;
1220 let mut running = 0;
1222 let prof = &cgcx.prof;
1223 let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
1225 // Run the message loop while there's still anything that needs message
1226 // processing. Note that as soon as codegen is aborted we simply want to
1227 // wait for all existing work to finish, so many of the conditions here
1228 // only apply if codegen hasn't been aborted as they represent pending
1232 || (!codegen_aborted
1233 && (work_items.len() > 0
1234 || needs_fat_lto.len() > 0
1235 || needs_thin_lto.len() > 0
1236 || lto_import_only_modules.len() > 0
1237 || main_thread_worker_state != MainThreadWorkerState::Idle))
1239 // While there are still CGUs to be codegened, the coordinator has
1240 // to decide how to utilize the compiler processes implicit Token:
1241 // For codegenning more CGU or for running them through LLVM.
1243 if main_thread_worker_state == MainThreadWorkerState::Idle {
1244 if !queue_full_enough(work_items.len(), running, max_workers) {
1245 // The queue is not full enough, codegen more items:
1246 if let Err(_) = codegen_worker_send.send(Message::CodegenItem) {
1247 panic!("Could not send Message::CodegenItem to main thread")
1249 main_thread_worker_state = MainThreadWorkerState::Codegenning;
1251 // The queue is full enough to not let the worker
1252 // threads starve. Use the implicit Token to do some
1255 work_items.pop().expect("queue empty - queue_full_enough() broken?");
1256 let cgcx = CodegenContext {
1257 worker: get_worker_id(&mut free_worker_ids),
1260 maybe_start_llvm_timer(
1262 cgcx.config(item.module_kind()),
1263 &mut llvm_start_time,
1265 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1266 spawn_work(cgcx, item);
1269 } else if codegen_aborted {
1270 // don't queue up any more work if codegen was aborted, we're
1271 // just waiting for our existing children to finish
1273 // If we've finished everything related to normal codegen
1274 // then it must be the case that we've got some LTO work to do.
1275 // Perform the serial work here of figuring out what we're
1276 // going to LTO and then push a bunch of work items onto our
1278 if work_items.len() == 0
1280 && main_thread_worker_state == MainThreadWorkerState::Idle
1282 assert!(!started_lto);
1285 let needs_fat_lto = mem::take(&mut needs_fat_lto);
1286 let needs_thin_lto = mem::take(&mut needs_thin_lto);
1287 let import_only_modules = mem::take(&mut lto_import_only_modules);
1290 generate_lto_work(&cgcx, needs_fat_lto, needs_thin_lto, import_only_modules)
1292 let insertion_index = work_items
1293 .binary_search_by_key(&cost, |&(_, cost)| cost)
1294 .unwrap_or_else(|e| e);
1295 work_items.insert(insertion_index, (work, cost));
1296 if !cgcx.opts.debugging_opts.no_parallel_llvm {
1297 helper.request_token();
1302 // In this branch, we know that everything has been codegened,
1303 // so it's just a matter of determining whether the implicit
1304 // Token is free to use for LLVM work.
1305 match main_thread_worker_state {
1306 MainThreadWorkerState::Idle => {
1307 if let Some((item, _)) = work_items.pop() {
1308 let cgcx = CodegenContext {
1309 worker: get_worker_id(&mut free_worker_ids),
1312 maybe_start_llvm_timer(
1314 cgcx.config(item.module_kind()),
1315 &mut llvm_start_time,
1317 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1318 spawn_work(cgcx, item);
1320 // There is no unstarted work, so let the main thread
1321 // take over for a running worker. Otherwise the
1322 // implicit token would just go to waste.
1323 // We reduce the `running` counter by one. The
1324 // `tokens.truncate()` below will take care of
1325 // giving the Token back.
1326 debug_assert!(running > 0);
1328 main_thread_worker_state = MainThreadWorkerState::LLVMing;
1331 MainThreadWorkerState::Codegenning => bug!(
1332 "codegen worker should not be codegenning after \
1333 codegen was already completed"
1335 MainThreadWorkerState::LLVMing => {
1336 // Already making good use of that token
1341 // Spin up what work we can, only doing this while we've got available
1342 // parallelism slots and work left to spawn.
1343 while !codegen_aborted && work_items.len() > 0 && running < tokens.len() {
1344 let (item, _) = work_items.pop().unwrap();
1346 maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
1349 CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
1351 spawn_work(cgcx, item);
1355 // Relinquish accidentally acquired extra tokens
1356 tokens.truncate(running);
1358 // If a thread exits successfully then we drop a token associated
1359 // with that worker and update our `running` count. We may later
1360 // re-acquire a token to continue running more work. We may also not
1361 // actually drop a token here if the worker was running with an
1362 // "ephemeral token"
1363 let mut free_worker = |worker_id| {
1364 if main_thread_worker_state == MainThreadWorkerState::LLVMing {
1365 main_thread_worker_state = MainThreadWorkerState::Idle;
1370 free_worker_ids.push(worker_id);
1373 let msg = coordinator_receive.recv().unwrap();
1374 match *msg.downcast::<Message<B>>().ok().unwrap() {
1375 // Save the token locally and the next turn of the loop will use
1376 // this to spawn a new unit of work, or it may get dropped
1377 // immediately if we have no more work to spawn.
1378 Message::Token(token) => {
1383 if main_thread_worker_state == MainThreadWorkerState::LLVMing {
1384 // If the main thread token is used for LLVM work
1385 // at the moment, we turn that thread into a regular
1386 // LLVM worker thread, so the main thread is free
1387 // to react to codegen demand.
1388 main_thread_worker_state = MainThreadWorkerState::Idle;
1393 let msg = &format!("failed to acquire jobserver token: {}", e);
1394 shared_emitter.fatal(msg);
1395 // Exit the coordinator thread
1401 Message::CodegenDone { llvm_work_item, cost } => {
1402 // We keep the queue sorted by estimated processing cost,
1403 // so that more expensive items are processed earlier. This
1404 // is good for throughput as it gives the main thread more
1405 // time to fill up the queue and it avoids scheduling
1406 // expensive items to the end.
1407 // Note, however, that this is not ideal for memory
1408 // consumption, as LLVM module sizes are not evenly
1410 let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1411 let insertion_index = match insertion_index {
1412 Ok(idx) | Err(idx) => idx,
1414 work_items.insert(insertion_index, (llvm_work_item, cost));
1416 if !cgcx.opts.debugging_opts.no_parallel_llvm {
1417 helper.request_token();
1419 assert!(!codegen_aborted);
1420 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1421 main_thread_worker_state = MainThreadWorkerState::Idle;
1424 Message::CodegenComplete => {
1425 codegen_done = true;
1426 assert!(!codegen_aborted);
1427 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1428 main_thread_worker_state = MainThreadWorkerState::Idle;
1431 // If codegen is aborted that means translation was aborted due
1432 // to some normal-ish compiler error. In this situation we want
1433 // to exit as soon as possible, but we want to make sure all
1434 // existing work has finished. Flag codegen as being done, and
1435 // then conditions above will ensure no more work is spawned but
1436 // we'll keep executing this loop until `running` hits 0.
1437 Message::CodegenAborted => {
1438 assert!(!codegen_aborted);
1439 codegen_done = true;
1440 codegen_aborted = true;
1441 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1443 Message::Done { result: Ok(compiled_module), worker_id } => {
1444 free_worker(worker_id);
1445 match compiled_module.kind {
1446 ModuleKind::Regular => {
1447 compiled_modules.push(compiled_module);
1449 ModuleKind::Metadata => {
1450 assert!(compiled_metadata_module.is_none());
1451 compiled_metadata_module = Some(compiled_module);
1453 ModuleKind::Allocator => {
1454 assert!(compiled_allocator_module.is_none());
1455 compiled_allocator_module = Some(compiled_module);
1459 Message::NeedsFatLTO { result, worker_id } => {
1460 assert!(!started_lto);
1461 free_worker(worker_id);
1462 needs_fat_lto.push(result);
1464 Message::NeedsThinLTO { name, thin_buffer, worker_id } => {
1465 assert!(!started_lto);
1466 free_worker(worker_id);
1467 needs_thin_lto.push((name, thin_buffer));
1469 Message::AddImportOnlyModule { module_data, work_product } => {
1470 assert!(!started_lto);
1471 assert!(!codegen_done);
1472 assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
1473 lto_import_only_modules.push((module_data, work_product));
1474 main_thread_worker_state = MainThreadWorkerState::Idle;
1476 // If the thread failed that means it panicked, so we abort immediately.
1477 Message::Done { result: Err(None), worker_id: _ } => {
1478 bug!("worker thread panicked");
1480 Message::Done { result: Err(Some(WorkerFatalError)), worker_id: _ } => {
1483 Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
1487 // Drop to print timings
1488 drop(llvm_start_time);
1490 // Regardless of what order these modules completed in, report them to
1491 // the backend in the same order every time to ensure that we're handing
1492 // out deterministic results.
1493 compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
1495 Ok(CompiledModules {
1496 modules: compiled_modules,
1497 metadata_module: compiled_metadata_module,
1498 allocator_module: compiled_allocator_module,
1502 // A heuristic that determines if we have enough LLVM WorkItems in the
1503 // queue so that the main thread can do LLVM work instead of codegen
1504 fn queue_full_enough(
1505 items_in_queue: usize,
1506 workers_running: usize,
1510 items_in_queue > 0 && items_in_queue >= max_workers.saturating_sub(workers_running / 2)
1513 fn maybe_start_llvm_timer<'a>(
1514 prof: &'a SelfProfilerRef,
1515 config: &ModuleConfig,
1516 llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1518 if config.time_module && llvm_start_time.is_none() {
1519 *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes"));
1524 pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
1526 /// `FatalError` is explicitly not `Send`.
1528 pub struct WorkerFatalError;
1530 fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
1531 thread::spawn(move || {
1532 // Set up a destructor which will fire off a message that we're done as
1534 struct Bomb<B: ExtraBackendMethods> {
1535 coordinator_send: Sender<Box<dyn Any + Send>>,
1536 result: Option<Result<WorkItemResult<B>, FatalError>>,
1539 impl<B: ExtraBackendMethods> Drop for Bomb<B> {
1540 fn drop(&mut self) {
1541 let worker_id = self.worker_id;
1542 let msg = match self.result.take() {
1543 Some(Ok(WorkItemResult::Compiled(m))) => {
1544 Message::Done::<B> { result: Ok(m), worker_id }
1546 Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
1547 Message::NeedsFatLTO::<B> { result: m, worker_id }
1549 Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
1550 Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
1552 Some(Err(FatalError)) => {
1553 Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
1555 None => Message::Done::<B> { result: Err(None), worker_id },
1557 drop(self.coordinator_send.send(Box::new(msg)));
1561 let mut bomb = Bomb::<B> {
1562 coordinator_send: cgcx.coordinator_send.clone(),
1564 worker_id: cgcx.worker,
1567 // Execute the work itself, and if it finishes successfully then flag
1568 // ourselves as a success as well.
1570 // Note that we ignore any `FatalError` coming out of `execute_work_item`,
1571 // as a diagnostic was already sent off to the main thread - just
1572 // surface that there was an error in this worker.
1574 let _prof_timer = cgcx.prof.generic_activity(work.profiling_event_id());
1575 Some(execute_work_item(&cgcx, work))
1580 pub fn run_assembler<B: ExtraBackendMethods>(
1581 cgcx: &CodegenContext<B>,
1586 let assembler = cgcx.assembler_cmd.as_ref().expect("cgcx.assembler_cmd is missing?");
1588 let pname = &assembler.name;
1589 let mut cmd = assembler.cmd.clone();
1590 cmd.arg("-c").arg("-o").arg(object).arg(assembly);
1591 debug!("{:?}", cmd);
1593 match cmd.output() {
1595 if !prog.status.success() {
1596 let mut note = prog.stderr.clone();
1597 note.extend_from_slice(&prog.stdout);
1600 .struct_err(&format!(
1601 "linking with `{}` failed: {}",
1605 .note(&format!("{:?}", &cmd))
1606 .note(str::from_utf8(¬e[..]).unwrap())
1608 handler.abort_if_errors();
1612 handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e));
1613 handler.abort_if_errors();
1618 enum SharedEmitterMessage {
1619 Diagnostic(Diagnostic),
1620 InlineAsmError(u32, String),
1626 pub struct SharedEmitter {
1627 sender: Sender<SharedEmitterMessage>,
1630 pub struct SharedEmitterMain {
1631 receiver: Receiver<SharedEmitterMessage>,
1634 impl SharedEmitter {
1635 pub fn new() -> (SharedEmitter, SharedEmitterMain) {
1636 let (sender, receiver) = channel();
1638 (SharedEmitter { sender }, SharedEmitterMain { receiver })
1641 pub fn inline_asm_error(&self, cookie: u32, msg: String) {
1642 drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg)));
1645 pub fn fatal(&self, msg: &str) {
1646 drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
1650 impl Emitter for SharedEmitter {
1651 fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
1652 drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1653 msg: diag.message(),
1654 code: diag.code.clone(),
1657 for child in &diag.children {
1658 drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
1659 msg: child.message(),
1664 drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
1666 fn source_map(&self) -> Option<&Lrc<SourceMap>> {
1671 impl SharedEmitterMain {
1672 pub fn check(&self, sess: &Session, blocking: bool) {
1674 let message = if blocking {
1675 match self.receiver.recv() {
1676 Ok(message) => Ok(message),
1680 match self.receiver.try_recv() {
1681 Ok(message) => Ok(message),
1687 Ok(SharedEmitterMessage::Diagnostic(diag)) => {
1688 let handler = sess.diagnostic();
1689 let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg);
1690 if let Some(code) = diag.code {
1693 handler.emit_diagnostic(&d);
1695 Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => {
1696 sess.span_err(ExpnId::from_u32(cookie).expn_data().call_site, &msg)
1698 Ok(SharedEmitterMessage::AbortIfErrors) => {
1699 sess.abort_if_errors();
1701 Ok(SharedEmitterMessage::Fatal(msg)) => {
1712 pub struct OngoingCodegen<B: ExtraBackendMethods> {
1714 pub crate_name: Symbol,
1715 pub crate_hash: Svh,
1716 pub metadata: EncodedMetadata,
1717 pub windows_subsystem: Option<String>,
1718 pub linker_info: LinkerInfo,
1719 pub crate_info: CrateInfo,
1720 pub coordinator_send: Sender<Box<dyn Any + Send>>,
1721 pub codegen_worker_receive: Receiver<Message<B>>,
1722 pub shared_emitter_main: SharedEmitterMain,
1723 pub future: thread::JoinHandle<Result<CompiledModules, ()>>,
1724 pub output_filenames: Arc<OutputFilenames>,
1727 impl<B: ExtraBackendMethods> OngoingCodegen<B> {
1728 pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
1729 let _timer = sess.timer("finish_ongoing_codegen");
1731 self.shared_emitter_main.check(sess, true);
1732 let future = self.future;
1733 let compiled_modules = sess.time("join_worker_thread", || match future.join() {
1734 Ok(Ok(compiled_modules)) => compiled_modules,
1736 sess.abort_if_errors();
1737 panic!("expected abort due to worker thread errors")
1740 bug!("panic during codegen/LLVM phase");
1744 sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
1746 sess.abort_if_errors();
1749 copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
1750 produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
1752 // FIXME: time_llvm_passes support - does this use a global context or
1754 if sess.codegen_units() == 1 && sess.time_llvm_passes() {
1755 self.backend.print_pass_timings()
1760 crate_name: self.crate_name,
1761 crate_hash: self.crate_hash,
1762 metadata: self.metadata,
1763 windows_subsystem: self.windows_subsystem,
1764 linker_info: self.linker_info,
1765 crate_info: self.crate_info,
1767 modules: compiled_modules.modules,
1768 allocator_module: compiled_modules.allocator_module,
1769 metadata_module: compiled_modules.metadata_module,
1775 pub fn submit_pre_codegened_module_to_llvm(
1778 module: ModuleCodegen<B::Module>,
1780 self.wait_for_signal_to_codegen_item();
1781 self.check_for_errors(tcx.sess);
1783 // These are generally cheap and won't throw off scheduling.
1785 submit_codegened_module_to_llvm(&self.backend, &self.coordinator_send, module, cost);
1788 pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
1789 self.wait_for_signal_to_codegen_item();
1790 self.check_for_errors(tcx.sess);
1791 drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::<B>)));
1794 /// Consumes this context indicating that codegen was entirely aborted, and
1795 /// we need to exit as quickly as possible.
1797 /// This method blocks the current thread until all worker threads have
1798 /// finished, and all worker threads should have exited or be real close to
1799 /// exiting at this point.
1800 pub fn codegen_aborted(self) {
1801 // Signal to the coordinator it should spawn no more work and start
1803 drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::<B>)));
1804 drop(self.future.join());
1807 pub fn check_for_errors(&self, sess: &Session) {
1808 self.shared_emitter_main.check(sess, false);
1811 pub fn wait_for_signal_to_codegen_item(&self) {
1812 match self.codegen_worker_receive.recv() {
1813 Ok(Message::CodegenItem) => {
1816 Ok(_) => panic!("unexpected message"),
1818 // One of the LLVM threads must have panicked, fall through so
1819 // error handling can be reached.
1825 pub fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
1827 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1828 module: ModuleCodegen<B::Module>,
1831 let llvm_work_item = WorkItem::Optimize(module);
1832 drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost })));
1835 pub fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
1837 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1838 module: CachedModuleCodegen,
1840 let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
1841 drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost: 0 })));
1844 pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
1847 tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
1848 module: CachedModuleCodegen,
1850 let filename = pre_lto_bitcode_filename(&module.name);
1851 let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
1852 let file = fs::File::open(&bc_path)
1853 .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
1856 memmap::Mmap::map(&file).unwrap_or_else(|e| {
1857 panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
1860 // Schedule the module to be loaded
1861 drop(tx_to_llvm_workers.send(Box::new(Message::AddImportOnlyModule::<B> {
1862 module_data: SerializedModule::FromUncompressedFile(mmap),
1863 work_product: module.source,
1867 pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
1868 format!("{}.{}", module_name, PRE_LTO_BC_EXT)
1871 fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
1872 // This should never be true (because it's not supported). If it is true,
1873 // something is wrong with commandline arg validation.
1875 !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
1876 && tcx.sess.target.target.options.is_like_msvc
1877 && tcx.sess.opts.cg.prefer_dynamic)
1880 tcx.sess.target.target.options.is_like_msvc &&
1881 tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) &&
1882 // ThinLTO can't handle this workaround in all cases, so we don't
1883 // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
1884 // dynamic linking when linker plugin LTO is enabled.
1885 !tcx.sess.opts.cg.linker_plugin_lto.enabled()