1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION};
12 use back::symbol_export;
13 use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext};
14 use back::write::{self, DiagnosticHandlers};
15 use errors::{FatalError, Handler};
16 use llvm::archive_ro::ArchiveRO;
17 use llvm::{True, False};
19 use rustc::hir::def_id::LOCAL_CRATE;
20 use rustc::middle::exported_symbols::SymbolExportLevel;
21 use rustc::session::config::{self, Lto};
22 use rustc::util::common::time_ext;
23 use time_graph::Timeline;
24 use {ModuleCodegen, ModuleLlvm, ModuleKind, ModuleSource};
28 use std::ffi::CString;
33 pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool {
35 config::CrateType::Executable |
36 config::CrateType::Staticlib |
37 config::CrateType::Cdylib => true,
39 config::CrateType::Dylib |
40 config::CrateType::Rlib |
41 config::CrateType::ProcMacro => false,
45 pub(crate) enum LtoModuleCodegen {
47 module: Option<ModuleCodegen>,
48 _serialized_bitcode: Vec<SerializedModule>,
54 impl LtoModuleCodegen {
55 pub fn name(&self) -> &str {
57 LtoModuleCodegen::Fat { .. } => "everything",
58 LtoModuleCodegen::Thin(ref m) => m.name(),
62 /// Optimize this module within the given codegen context.
64 /// This function is unsafe as it'll return a `ModuleCodegen` still
65 /// points to LLVM data structures owned by this `LtoModuleCodegen`.
66 /// It's intended that the module returned is immediately code generated and
67 /// dropped, and then this LTO module is dropped.
68 pub(crate) unsafe fn optimize(&mut self,
69 cgcx: &CodegenContext,
70 timeline: &mut Timeline)
71 -> Result<ModuleCodegen, FatalError>
74 LtoModuleCodegen::Fat { ref mut module, .. } => {
75 let module = module.take().unwrap();
77 let config = cgcx.config(module.kind);
78 let llmod = module.llvm().unwrap().llmod();
79 let tm = &*module.llvm().unwrap().tm;
80 run_pass_manager(cgcx, tm, llmod, config, false);
81 timeline.record("fat-done");
85 LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline),
89 /// A "gauge" of how costly it is to optimize this module, used to sort
90 /// biggest modules first.
91 pub fn cost(&self) -> u64 {
93 // Only one module with fat LTO, so the cost doesn't matter.
94 LtoModuleCodegen::Fat { .. } => 0,
95 LtoModuleCodegen::Thin(ref m) => m.cost(),
100 pub(crate) fn run(cgcx: &CodegenContext,
101 modules: Vec<ModuleCodegen>,
102 timeline: &mut Timeline)
103 -> Result<Vec<LtoModuleCodegen>, FatalError>
105 let diag_handler = cgcx.create_diag_handler();
106 let export_threshold = match cgcx.lto {
107 // We're just doing LTO for our one crate
108 Lto::ThinLocal => SymbolExportLevel::Rust,
110 // We're doing LTO for the entire crate graph
111 Lto::Yes | Lto::Fat | Lto::Thin => {
112 symbol_export::crates_export_threshold(&cgcx.crate_types)
115 Lto::No => panic!("didn't request LTO but we're doing LTO"),
118 let symbol_filter = &|&(ref name, level): &(String, SymbolExportLevel)| {
119 if level.is_below_threshold(export_threshold) {
120 let mut bytes = Vec::with_capacity(name.len() + 1);
121 bytes.extend(name.bytes());
122 Some(CString::new(bytes).unwrap())
127 let exported_symbols = cgcx.exported_symbols
128 .as_ref().expect("needs exported symbols for LTO");
129 let mut symbol_white_list = exported_symbols[&LOCAL_CRATE]
131 .filter_map(symbol_filter)
132 .collect::<Vec<CString>>();
133 timeline.record("whitelist");
134 info!("{} symbols to preserve in this crate", symbol_white_list.len());
136 // If we're performing LTO for the entire crate graph, then for each of our
137 // upstream dependencies, find the corresponding rlib and load the bitcode
140 // We save off all the bytecode and LLVM module ids for later processing
141 // with either fat or thin LTO
142 let mut upstream_modules = Vec::new();
143 if cgcx.lto != Lto::ThinLocal {
144 if cgcx.opts.cg.prefer_dynamic {
145 diag_handler.struct_err("cannot prefer dynamic linking when performing LTO")
146 .note("only 'staticlib', 'bin', and 'cdylib' outputs are \
149 return Err(FatalError)
152 // Make sure we actually can run LTO
153 for crate_type in cgcx.crate_types.iter() {
154 if !crate_type_allows_lto(*crate_type) {
155 let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \
156 static library outputs");
161 for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
162 let exported_symbols = cgcx.exported_symbols
163 .as_ref().expect("needs exported symbols for LTO");
164 symbol_white_list.extend(
165 exported_symbols[&cnum]
167 .filter_map(symbol_filter));
169 let archive = ArchiveRO::open(&path).expect("wanted an rlib");
170 let bytecodes = archive.iter().filter_map(|child| {
171 child.ok().and_then(|c| c.name().map(|name| (name, c)))
172 }).filter(|&(name, _)| name.ends_with(RLIB_BYTECODE_EXTENSION));
173 for (name, data) in bytecodes {
174 info!("adding bytecode {}", name);
175 let bc_encoded = data.data();
177 let (bc, id) = time_ext(cgcx.time_passes, None, &format!("decode {}", name), || {
178 match DecodedBytecode::new(bc_encoded) {
179 Ok(b) => Ok((b.bytecode(), b.identifier().to_string())),
180 Err(e) => Err(diag_handler.fatal(&e)),
183 let bc = SerializedModule::FromRlib(bc);
184 upstream_modules.push((bc, CString::new(id).unwrap()));
186 timeline.record(&format!("load: {}", path.display()));
190 let arr = symbol_white_list.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
192 Lto::Yes | // `-C lto` == fat LTO by default
194 fat_lto(cgcx, &diag_handler, modules, upstream_modules, &arr, timeline)
198 if cgcx.opts.debugging_opts.cross_lang_lto.enabled() {
199 unreachable!("We should never reach this case if the LTO step \
200 is deferred to the linker");
202 thin_lto(&diag_handler, modules, upstream_modules, &arr, timeline)
204 Lto::No => unreachable!(),
208 fn fat_lto(cgcx: &CodegenContext,
209 diag_handler: &Handler,
210 mut modules: Vec<ModuleCodegen>,
211 mut serialized_modules: Vec<(SerializedModule, CString)>,
212 symbol_white_list: &[*const libc::c_char],
213 timeline: &mut Timeline)
214 -> Result<Vec<LtoModuleCodegen>, FatalError>
216 info!("going for a fat lto");
218 // Find the "costliest" module and merge everything into that codegen unit.
219 // All the other modules will be serialized and reparsed into the new
220 // context, so this hopefully avoids serializing and parsing the largest
223 // Additionally use a regular module as the base here to ensure that various
224 // file copy operations in the backend work correctly. The only other kind
225 // of module here should be an allocator one, and if your crate is smaller
226 // than the allocator module then the size doesn't really matter anyway.
227 let (_, costliest_module) = modules.iter()
229 .filter(|&(_, module)| module.kind == ModuleKind::Regular)
232 llvm::LLVMRustModuleCost(module.llvm().unwrap().llmod())
237 .expect("must be codegen'ing at least one module");
238 let module = modules.remove(costliest_module);
239 let mut serialized_bitcode = Vec::new();
241 let (llcx, llmod) = {
242 let llvm = module.llvm().expect("can't lto pre-codegened modules");
243 (&llvm.llcx, llvm.llmod())
245 info!("using {:?} as a base module", module.name);
247 // The linking steps below may produce errors and diagnostics within LLVM
248 // which we'd like to handle and print, so set up our diagnostic handlers
249 // (which get unregistered when they go out of scope below).
250 let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
252 // For all other modules we codegened we'll need to link them into our own
253 // bitcode. All modules were codegened in their own LLVM context, however,
254 // and we want to move everything to the same LLVM context. Currently the
255 // way we know of to do that is to serialize them to a string and them parse
256 // them later. Not great but hey, that's why it's "fat" LTO, right?
257 for module in modules {
258 let llvm = module.llvm().expect("can't lto pre-codegened modules");
259 let buffer = ModuleBuffer::new(llvm.llmod());
260 let llmod_id = CString::new(&module.name[..]).unwrap();
261 serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
264 // For all serialized bitcode files we parse them and link them in as we did
265 // above, this is all mostly handled in C++. Like above, though, we don't
266 // know much about the memory management here so we err on the side of being
267 // save and persist everything with the original module.
268 let mut linker = Linker::new(llmod);
269 for (bc_decoded, name) in serialized_modules {
270 info!("linking {:?}", name);
271 time_ext(cgcx.time_passes, None, &format!("ll link {:?}", name), || {
272 let data = bc_decoded.data();
273 linker.add(&data).map_err(|()| {
274 let msg = format!("failed to load bc of {:?}", name);
275 write::llvm_err(&diag_handler, msg)
278 timeline.record(&format!("link {:?}", name));
279 serialized_bitcode.push(bc_decoded);
282 cgcx.save_temp_bitcode(&module, "lto.input");
284 // Internalize everything that *isn't* in our whitelist to help strip out
285 // more modules and such
287 let ptr = symbol_white_list.as_ptr();
288 llvm::LLVMRustRunRestrictionPass(llmod,
289 ptr as *const *const libc::c_char,
290 symbol_white_list.len() as libc::size_t);
291 cgcx.save_temp_bitcode(&module, "lto.after-restriction");
294 if cgcx.no_landing_pads {
296 llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
298 cgcx.save_temp_bitcode(&module, "lto.after-nounwind");
300 timeline.record("passes");
303 Ok(vec![LtoModuleCodegen::Fat {
304 module: Some(module),
305 _serialized_bitcode: serialized_bitcode,
309 struct Linker<'a>(&'a mut llvm::Linker<'a>);
312 fn new(llmod: &'a llvm::Module) -> Self {
313 unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
316 fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
318 if llvm::LLVMRustLinkerAdd(self.0,
319 bytecode.as_ptr() as *const libc::c_char,
329 impl Drop for Linker<'a> {
331 unsafe { llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _)); }
335 /// Prepare "thin" LTO to get run on these modules.
337 /// The general structure of ThinLTO is quite different from the structure of
338 /// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
339 /// one giant LLVM module, and then we run more optimization passes over this
340 /// big module after internalizing most symbols. Thin LTO, on the other hand,
341 /// avoid this large bottleneck through more targeted optimization.
343 /// At a high level Thin LTO looks like:
345 /// 1. Prepare a "summary" of each LLVM module in question which describes
346 /// the values inside, cost of the values, etc.
347 /// 2. Merge the summaries of all modules in question into one "index"
348 /// 3. Perform some global analysis on this index
349 /// 4. For each module, use the index and analysis calculated previously to
350 /// perform local transformations on the module, for example inlining
351 /// small functions from other modules.
352 /// 5. Run thin-specific optimization passes over each module, and then code
353 /// generate everything at the end.
355 /// The summary for each module is intended to be quite cheap, and the global
356 /// index is relatively quite cheap to create as well. As a result, the goal of
357 /// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
358 /// situations. For example one cheap optimization is that we can parallelize
359 /// all codegen modules, easily making use of all the cores on a machine.
361 /// With all that in mind, the function here is designed at specifically just
362 /// calculating the *index* for ThinLTO. This index will then be shared amongst
363 /// all of the `LtoModuleCodegen` units returned below and destroyed once
364 /// they all go out of scope.
365 fn thin_lto(diag_handler: &Handler,
366 modules: Vec<ModuleCodegen>,
367 serialized_modules: Vec<(SerializedModule, CString)>,
368 symbol_white_list: &[*const libc::c_char],
369 timeline: &mut Timeline)
370 -> Result<Vec<LtoModuleCodegen>, FatalError>
373 info!("going for that thin, thin LTO");
375 let mut thin_buffers = Vec::new();
376 let mut module_names = Vec::new();
377 let mut thin_modules = Vec::new();
379 // FIXME: right now, like with fat LTO, we serialize all in-memory
380 // modules before working with them and ThinLTO. We really
381 // shouldn't do this, however, and instead figure out how to
382 // extract a summary from an in-memory module and then merge that
383 // into the global index. It turns out that this loop is by far
384 // the most expensive portion of this small bit of global
386 for (i, module) in modules.iter().enumerate() {
387 info!("local module: {} - {}", i, module.name);
388 let llvm = module.llvm().expect("can't lto precodegened module");
389 let name = CString::new(module.name.clone()).unwrap();
390 let buffer = ThinBuffer::new(llvm.llmod());
391 thin_modules.push(llvm::ThinLTOModule {
392 identifier: name.as_ptr(),
393 data: buffer.data().as_ptr(),
394 len: buffer.data().len(),
396 thin_buffers.push(buffer);
397 module_names.push(name);
398 timeline.record(&module.name);
401 // FIXME: All upstream crates are deserialized internally in the
402 // function below to extract their summary and modules. Note that
403 // unlike the loop above we *must* decode and/or read something
404 // here as these are all just serialized files on disk. An
405 // improvement, however, to make here would be to store the
406 // module summary separately from the actual module itself. Right
407 // now this is store in one large bitcode file, and the entire
408 // file is deflate-compressed. We could try to bypass some of the
409 // decompression by storing the index uncompressed and only
410 // lazily decompressing the bytecode if necessary.
412 // Note that truly taking advantage of this optimization will
413 // likely be further down the road. We'd have to implement
414 // incremental ThinLTO first where we could actually avoid
415 // looking at upstream modules entirely sometimes (the contents,
416 // we must always unconditionally look at the index).
417 let mut serialized = Vec::new();
418 for (module, name) in serialized_modules {
419 info!("foreign module {:?}", name);
420 thin_modules.push(llvm::ThinLTOModule {
421 identifier: name.as_ptr(),
422 data: module.data().as_ptr(),
423 len: module.data().len(),
425 serialized.push(module);
426 module_names.push(name);
429 // Delegate to the C++ bindings to create some data here. Once this is a
430 // tried-and-true interface we may wish to try to upstream some of this
431 // to LLVM itself, right now we reimplement a lot of what they do
433 let data = llvm::LLVMRustCreateThinLTOData(
434 thin_modules.as_ptr(),
435 thin_modules.len() as u32,
436 symbol_white_list.as_ptr(),
437 symbol_white_list.len() as u32,
439 write::llvm_err(&diag_handler, "failed to prepare thin LTO context".to_string())
442 let data = ThinData(data);
443 info!("thin LTO data created");
444 timeline.record("data");
446 // Throw our data in an `Arc` as we'll be sharing it across threads. We
447 // also put all memory referenced by the C++ data (buffers, ids, etc)
448 // into the arc as well. After this we'll create a thin module
449 // codegen per module in this data.
450 let shared = Arc::new(ThinShared {
453 serialized_modules: serialized,
456 Ok((0..shared.module_names.len()).map(|i| {
457 LtoModuleCodegen::Thin(ThinModule {
458 shared: shared.clone(),
465 fn run_pass_manager(cgcx: &CodegenContext,
466 tm: &llvm::TargetMachine,
467 llmod: &llvm::Module,
468 config: &ModuleConfig,
470 // Now we have one massive module inside of llmod. Time to run the
471 // LTO-specific optimization passes that LLVM provides.
473 // This code is based off the code found in llvm's LTO code generator:
474 // tools/lto/LTOCodeGenerator.cpp
475 debug!("running the pass manager");
477 let pm = llvm::LLVMCreatePassManager();
478 llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod);
480 if config.verify_llvm_ir {
481 let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _);
482 llvm::LLVMRustAddPass(pm, pass.unwrap());
485 // When optimizing for LTO we don't actually pass in `-O0`, but we force
486 // it to always happen at least with `-O1`.
488 // With ThinLTO we mess around a lot with symbol visibility in a way
489 // that will actually cause linking failures if we optimize at O0 which
490 // notable is lacking in dead code elimination. To ensure we at least
491 // get some optimizations and correctly link we forcibly switch to `-O1`
492 // to get dead code elimination.
494 // Note that in general this shouldn't matter too much as you typically
495 // only turn on ThinLTO when you're compiling with optimizations
497 let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None);
498 let opt_level = match opt_level {
499 llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less,
502 with_llvm_pmb(llmod, config, opt_level, false, &mut |b| {
504 if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) {
505 panic!("this version of LLVM does not support ThinLTO");
508 llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm,
509 /* Internalize = */ False,
510 /* RunInliner = */ True);
514 if config.verify_llvm_ir {
515 let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _);
516 llvm::LLVMRustAddPass(pm, pass.unwrap());
519 time_ext(cgcx.time_passes, None, "LTO passes", ||
520 llvm::LLVMRunPassManager(pm, llmod));
522 llvm::LLVMDisposePassManager(pm);
527 pub enum SerializedModule {
532 impl SerializedModule {
533 fn data(&self) -> &[u8] {
535 SerializedModule::Local(ref m) => m.data(),
536 SerializedModule::FromRlib(ref m) => m,
541 pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
543 unsafe impl Send for ModuleBuffer {}
544 unsafe impl Sync for ModuleBuffer {}
547 pub fn new(m: &llvm::Module) -> ModuleBuffer {
548 ModuleBuffer(unsafe {
549 llvm::LLVMRustModuleBufferCreate(m)
553 pub fn data(&self) -> &[u8] {
555 let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
556 let len = llvm::LLVMRustModuleBufferLen(self.0);
557 slice::from_raw_parts(ptr, len)
562 impl Drop for ModuleBuffer {
564 unsafe { llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _)); }
568 pub struct ThinModule {
569 shared: Arc<ThinShared>,
575 thin_buffers: Vec<ThinBuffer>,
576 serialized_modules: Vec<SerializedModule>,
577 module_names: Vec<CString>,
580 struct ThinData(&'static mut llvm::ThinLTOData);
582 unsafe impl Send for ThinData {}
583 unsafe impl Sync for ThinData {}
585 impl Drop for ThinData {
588 llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
593 pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
595 unsafe impl Send for ThinBuffer {}
596 unsafe impl Sync for ThinBuffer {}
599 pub fn new(m: &llvm::Module) -> ThinBuffer {
601 let buffer = llvm::LLVMRustThinLTOBufferCreate(m);
606 pub fn data(&self) -> &[u8] {
608 let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
609 let len = llvm::LLVMRustThinLTOBufferLen(self.0);
610 slice::from_raw_parts(ptr, len)
615 impl Drop for ThinBuffer {
618 llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
624 fn name(&self) -> &str {
625 self.shared.module_names[self.idx].to_str().unwrap()
628 fn cost(&self) -> u64 {
629 // Yes, that's correct, we're using the size of the bytecode as an
630 // indicator for how costly this codegen unit is.
631 self.data().len() as u64
634 fn data(&self) -> &[u8] {
635 let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data());
636 a.unwrap_or_else(|| {
637 let len = self.shared.thin_buffers.len();
638 self.shared.serialized_modules[self.idx - len].data()
642 unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline)
643 -> Result<ModuleCodegen, FatalError>
645 let diag_handler = cgcx.create_diag_handler();
646 let tm = (cgcx.tm_factory)().map_err(|e| {
647 write::llvm_err(&diag_handler, e)
650 // Right now the implementation we've got only works over serialized
651 // modules, so we create a fresh new LLVM context and parse the module
652 // into that context. One day, however, we may do this for upstream
653 // crates but for locally codegened modules we may be able to reuse
654 // that LLVM Context and Module.
655 let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
656 let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO(
658 self.data().as_ptr(),
660 self.shared.module_names[self.idx].as_ptr(),
662 let msg = "failed to parse bitcode for thin LTO module".to_string();
663 write::llvm_err(&diag_handler, msg)
665 let module = ModuleCodegen {
666 source: ModuleSource::Codegened(ModuleLlvm {
671 name: self.name().to_string(),
672 kind: ModuleKind::Regular,
675 let llmod = module.llvm().unwrap().llmod();
676 cgcx.save_temp_bitcode(&module, "thin-lto-input");
678 // Before we do much else find the "main" `DICompileUnit` that we'll be
679 // using below. If we find more than one though then rustc has changed
680 // in a way we're not ready for, so generate an ICE by returning
682 let mut cu1 = ptr::null_mut();
683 let mut cu2 = ptr::null_mut();
684 llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
686 let msg = "multiple source DICompileUnits found".to_string();
687 return Err(write::llvm_err(&diag_handler, msg))
690 // Like with "fat" LTO, get some better optimizations if landing pads
691 // are disabled by removing all landing pads.
692 if cgcx.no_landing_pads {
693 llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
694 cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind");
695 timeline.record("nounwind");
698 // Up next comes the per-module local analyses that we do for Thin LTO.
699 // Each of these functions is basically copied from the LLVM
700 // implementation and then tailored to suit this implementation. Ideally
701 // each of these would be supported by upstream LLVM but that's perhaps
702 // a patch for another day!
704 // You can find some more comments about these functions in the LLVM
705 // bindings we've got (currently `PassWrapper.cpp`)
706 if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) {
707 let msg = "failed to prepare thin LTO module".to_string();
708 return Err(write::llvm_err(&diag_handler, msg))
710 cgcx.save_temp_bitcode(&module, "thin-lto-after-rename");
711 timeline.record("rename");
712 if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) {
713 let msg = "failed to prepare thin LTO module".to_string();
714 return Err(write::llvm_err(&diag_handler, msg))
716 cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve");
717 timeline.record("resolve");
718 if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) {
719 let msg = "failed to prepare thin LTO module".to_string();
720 return Err(write::llvm_err(&diag_handler, msg))
722 cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize");
723 timeline.record("internalize");
724 if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) {
725 let msg = "failed to prepare thin LTO module".to_string();
726 return Err(write::llvm_err(&diag_handler, msg))
728 cgcx.save_temp_bitcode(&module, "thin-lto-after-import");
729 timeline.record("import");
731 // Ok now this is a bit unfortunate. This is also something you won't
732 // find upstream in LLVM's ThinLTO passes! This is a hack for now to
733 // work around bugs in LLVM.
735 // First discovered in #45511 it was found that as part of ThinLTO
736 // importing passes LLVM will import `DICompileUnit` metadata
737 // information across modules. This means that we'll be working with one
738 // LLVM module that has multiple `DICompileUnit` instances in it (a
739 // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
740 // bugs in LLVM's backend which generates invalid DWARF in a situation
743 // https://bugs.llvm.org/show_bug.cgi?id=35212
744 // https://bugs.llvm.org/show_bug.cgi?id=35562
746 // While the first bug there is fixed the second ended up causing #46346
747 // which was basically a resurgence of #45511 after LLVM's bug 35212 was
750 // This function below is a huge hack around this problem. The function
751 // below is defined in `PassWrapper.cpp` and will basically "merge"
752 // all `DICompileUnit` instances in a module. Basically it'll take all
753 // the objects, rewrite all pointers of `DISubprogram` to point to the
754 // first `DICompileUnit`, and then delete all the other units.
756 // This is probably mangling to the debug info slightly (but hopefully
757 // not too much) but for now at least gets LLVM to emit valid DWARF (or
758 // so it appears). Hopefully we can remove this once upstream bugs are
760 llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
761 cgcx.save_temp_bitcode(&module, "thin-lto-after-patch");
762 timeline.record("patch");
764 // Alright now that we've done everything related to the ThinLTO
765 // analysis it's time to run some optimizations! Here we use the same
766 // `run_pass_manager` as the "fat" LTO above except that we tell it to
767 // populate a thin-specific pass manager, which presumably LLVM treats a
768 // little differently.
769 info!("running thin lto passes over {}", module.name);
770 let config = cgcx.config(module.kind);
771 run_pass_manager(cgcx, module.llvm().unwrap().tm, llmod, config, true);
772 cgcx.save_temp_bitcode(&module, "thin-lto-after-pm");
773 timeline.record("thin-done");