]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/back/write.rs
Rollup merge of #69038 - yaahc:backtrace-debug, r=dtolnay
[rust.git] / src / librustc_codegen_llvm / back / write.rs
1 use crate::attributes;
2 use crate::back::bytecode;
3 use crate::back::lto::ThinBuffer;
4 use crate::base;
5 use crate::common;
6 use crate::consts;
7 use crate::context::{get_reloc_model, is_pie_binary};
8 use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
9 use crate::llvm_util;
10 use crate::type_::Type;
11 use crate::LlvmCodegenBackend;
12 use crate::ModuleLlvm;
13 use log::debug;
14 use rustc::bug;
15 use rustc::session::config::{self, Lto, OutputType, Passes, Sanitizer, SwitchWithOptPath};
16 use rustc::session::Session;
17 use rustc::ty::TyCtxt;
18 use rustc_codegen_ssa::back::write::{run_assembler, CodegenContext, ModuleConfig};
19 use rustc_codegen_ssa::traits::*;
20 use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, RLIB_BYTECODE_EXTENSION};
21 use rustc_data_structures::small_c_str::SmallCStr;
22 use rustc_errors::{FatalError, Handler};
23 use rustc_fs_util::{link_or_copy, path_to_c_string};
24 use rustc_hir::def_id::LOCAL_CRATE;
25
26 use libc::{c_char, c_int, c_uint, c_void, size_t};
27 use std::ffi::CString;
28 use std::fs;
29 use std::io::{self, Write};
30 use std::path::{Path, PathBuf};
31 use std::slice;
32 use std::str;
33 use std::sync::Arc;
34
35 pub const RELOC_MODEL_ARGS: [(&str, llvm::RelocMode); 7] = [
36     ("pic", llvm::RelocMode::PIC),
37     ("static", llvm::RelocMode::Static),
38     ("default", llvm::RelocMode::Default),
39     ("dynamic-no-pic", llvm::RelocMode::DynamicNoPic),
40     ("ropi", llvm::RelocMode::ROPI),
41     ("rwpi", llvm::RelocMode::RWPI),
42     ("ropi-rwpi", llvm::RelocMode::ROPI_RWPI),
43 ];
44
45 pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[
46     ("small", llvm::CodeModel::Small),
47     ("kernel", llvm::CodeModel::Kernel),
48     ("medium", llvm::CodeModel::Medium),
49     ("large", llvm::CodeModel::Large),
50 ];
51
52 pub const TLS_MODEL_ARGS: [(&str, llvm::ThreadLocalMode); 4] = [
53     ("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic),
54     ("local-dynamic", llvm::ThreadLocalMode::LocalDynamic),
55     ("initial-exec", llvm::ThreadLocalMode::InitialExec),
56     ("local-exec", llvm::ThreadLocalMode::LocalExec),
57 ];
58
59 pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
60     match llvm::last_error() {
61         Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
62         None => handler.fatal(&msg),
63     }
64 }
65
66 pub fn write_output_file(
67     handler: &rustc_errors::Handler,
68     target: &'ll llvm::TargetMachine,
69     pm: &llvm::PassManager<'ll>,
70     m: &'ll llvm::Module,
71     output: &Path,
72     file_type: llvm::FileType,
73 ) -> Result<(), FatalError> {
74     unsafe {
75         let output_c = path_to_c_string(output);
76         let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type);
77         result.into_result().map_err(|()| {
78             let msg = format!("could not write output to {}", output.display());
79             llvm_err(handler, &msg)
80         })
81     }
82 }
83
84 pub fn create_informational_target_machine(
85     sess: &Session,
86     find_features: bool,
87 ) -> &'static mut llvm::TargetMachine {
88     target_machine_factory(sess, config::OptLevel::No, find_features)()
89         .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
90 }
91
92 pub fn create_target_machine(
93     tcx: TyCtxt<'_>,
94     find_features: bool,
95 ) -> &'static mut llvm::TargetMachine {
96     target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)()
97         .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
98 }
99
100 pub fn to_llvm_opt_settings(
101     cfg: config::OptLevel,
102 ) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
103     use self::config::OptLevel::*;
104     match cfg {
105         No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
106         Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
107         Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
108         Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
109         Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
110         SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
111     }
112 }
113
114 fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
115     use config::OptLevel::*;
116     match cfg {
117         No => llvm::PassBuilderOptLevel::O0,
118         Less => llvm::PassBuilderOptLevel::O1,
119         Default => llvm::PassBuilderOptLevel::O2,
120         Aggressive => llvm::PassBuilderOptLevel::O3,
121         Size => llvm::PassBuilderOptLevel::Os,
122         SizeMin => llvm::PassBuilderOptLevel::Oz,
123     }
124 }
125
126 // If find_features is true this won't access `sess.crate_types` by assuming
127 // that `is_pie_binary` is false. When we discover LLVM target features
128 // `sess.crate_types` is uninitialized so we cannot access it.
129 pub fn target_machine_factory(
130     sess: &Session,
131     optlvl: config::OptLevel,
132     find_features: bool,
133 ) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
134     let reloc_model = get_reloc_model(sess);
135
136     let (opt_level, _) = to_llvm_opt_settings(optlvl);
137     let use_softfp = sess.opts.cg.soft_float;
138
139     let ffunction_sections = sess.target.target.options.function_sections;
140     let fdata_sections = ffunction_sections;
141
142     let code_model_arg =
143         sess.opts.cg.code_model.as_ref().or(sess.target.target.options.code_model.as_ref());
144
145     let code_model = match code_model_arg {
146         Some(s) => match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
147             Some(x) => x.1,
148             _ => {
149                 sess.err(&format!("{:?} is not a valid code model", code_model_arg));
150                 sess.abort_if_errors();
151                 bug!();
152             }
153         },
154         None => llvm::CodeModel::None,
155     };
156
157     let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
158     let mut singlethread = sess.target.target.options.singlethread;
159
160     // On the wasm target once the `atomics` feature is enabled that means that
161     // we're no longer single-threaded, or otherwise we don't want LLVM to
162     // lower atomic operations to single-threaded operations.
163     if singlethread
164         && sess.target.target.llvm_target.contains("wasm32")
165         && features.iter().any(|s| *s == "+atomics")
166     {
167         singlethread = false;
168     }
169
170     let triple = SmallCStr::new(&sess.target.target.llvm_target);
171     let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
172     let features = features.join(",");
173     let features = CString::new(features).unwrap();
174     let abi = SmallCStr::new(&sess.target.target.options.llvm_abiname);
175     let is_pie_binary = !find_features && is_pie_binary(sess);
176     let trap_unreachable = sess.target.target.options.trap_unreachable;
177     let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
178
179     let asm_comments = sess.asm_comments();
180     let relax_elf_relocations = sess.target.target.options.relax_elf_relocations;
181     Arc::new(move || {
182         let tm = unsafe {
183             llvm::LLVMRustCreateTargetMachine(
184                 triple.as_ptr(),
185                 cpu.as_ptr(),
186                 features.as_ptr(),
187                 abi.as_ptr(),
188                 code_model,
189                 reloc_model,
190                 opt_level,
191                 use_softfp,
192                 is_pie_binary,
193                 ffunction_sections,
194                 fdata_sections,
195                 trap_unreachable,
196                 singlethread,
197                 asm_comments,
198                 emit_stack_size_section,
199                 relax_elf_relocations,
200             )
201         };
202
203         tm.ok_or_else(|| {
204             format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
205         })
206     })
207 }
208
209 pub(crate) fn save_temp_bitcode(
210     cgcx: &CodegenContext<LlvmCodegenBackend>,
211     module: &ModuleCodegen<ModuleLlvm>,
212     name: &str,
213 ) {
214     if !cgcx.save_temps {
215         return;
216     }
217     unsafe {
218         let ext = format!("{}.bc", name);
219         let cgu = Some(&module.name[..]);
220         let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
221         let cstr = path_to_c_string(&path);
222         let llmod = module.module_llvm.llmod();
223         llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
224     }
225 }
226
227 pub struct DiagnosticHandlers<'a> {
228     data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
229     llcx: &'a llvm::Context,
230 }
231
232 impl<'a> DiagnosticHandlers<'a> {
233     pub fn new(
234         cgcx: &'a CodegenContext<LlvmCodegenBackend>,
235         handler: &'a Handler,
236         llcx: &'a llvm::Context,
237     ) -> Self {
238         let data = Box::into_raw(Box::new((cgcx, handler)));
239         unsafe {
240             llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
241             llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
242         }
243         DiagnosticHandlers { data, llcx }
244     }
245 }
246
247 impl<'a> Drop for DiagnosticHandlers<'a> {
248     fn drop(&mut self) {
249         use std::ptr::null_mut;
250         unsafe {
251             llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
252             llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
253             drop(Box::from_raw(self.data));
254         }
255     }
256 }
257
258 unsafe extern "C" fn report_inline_asm(
259     cgcx: &CodegenContext<LlvmCodegenBackend>,
260     msg: &str,
261     cookie: c_uint,
262 ) {
263     cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
264 }
265
266 unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
267     if user.is_null() {
268         return;
269     }
270     let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
271
272     let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
273         .expect("non-UTF8 SMDiagnostic");
274
275     report_inline_asm(cgcx, &msg, cookie);
276 }
277
278 unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
279     if user.is_null() {
280         return;
281     }
282     let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
283
284     match llvm::diagnostic::Diagnostic::unpack(info) {
285         llvm::diagnostic::InlineAsm(inline) => {
286             report_inline_asm(cgcx, &llvm::twine_to_string(inline.message), inline.cookie);
287         }
288
289         llvm::diagnostic::Optimization(opt) => {
290             let enabled = match cgcx.remark {
291                 Passes::All => true,
292                 Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
293             };
294
295             if enabled {
296                 diag_handler.note_without_error(&format!(
297                     "optimization {} for {} at {}:{}:{}: {}",
298                     opt.kind.describe(),
299                     opt.pass_name,
300                     opt.filename,
301                     opt.line,
302                     opt.column,
303                     opt.message
304                 ));
305             }
306         }
307         llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
308             let msg = llvm::build_string(|s| {
309                 llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
310             })
311             .expect("non-UTF8 diagnostic");
312             diag_handler.warn(&msg);
313         }
314         llvm::diagnostic::UnknownDiagnostic(..) => {}
315     }
316 }
317
318 fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
319     match config.pgo_gen {
320         SwitchWithOptPath::Enabled(ref opt_dir_path) => {
321             let path = if let Some(dir_path) = opt_dir_path {
322                 dir_path.join("default_%m.profraw")
323             } else {
324                 PathBuf::from("default_%m.profraw")
325             };
326
327             Some(CString::new(format!("{}", path.display())).unwrap())
328         }
329         SwitchWithOptPath::Disabled => None,
330     }
331 }
332
333 fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
334     config
335         .pgo_use
336         .as_ref()
337         .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
338 }
339
340 pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool {
341     // We only support the new pass manager starting with LLVM 9.
342     if llvm_util::get_major_version() < 9 {
343         return false;
344     }
345
346     // The new pass manager is disabled by default.
347     config.new_llvm_pass_manager.unwrap_or(false)
348 }
349
350 pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
351     module: &ModuleCodegen<ModuleLlvm>,
352     config: &ModuleConfig,
353     opt_level: config::OptLevel,
354     opt_stage: llvm::OptStage,
355 ) {
356     let unroll_loops =
357         opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
358     let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
359     let pgo_gen_path = get_pgo_gen_path(config);
360     let pgo_use_path = get_pgo_use_path(config);
361     let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
362     // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
363     let sanitizer_options = if !is_lto {
364         config.sanitizer.as_ref().map(|s| llvm::SanitizerOptions {
365             sanitize_memory: *s == Sanitizer::Memory,
366             sanitize_thread: *s == Sanitizer::Thread,
367             sanitize_address: *s == Sanitizer::Address,
368             sanitize_recover: config.sanitizer_recover.contains(s),
369             sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
370         })
371     } else {
372         None
373     };
374
375     // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
376     // We would have to add upstream support for this first, before we can support
377     // config.inline_threshold and our more aggressive default thresholds.
378     // FIXME: NewPM uses an different and more explicit way to textually represent
379     // pass pipelines. It would probably make sense to expose this, but it would
380     // require a different format than the current -C passes.
381     llvm::LLVMRustOptimizeWithNewPassManager(
382         module.module_llvm.llmod(),
383         &*module.module_llvm.tm,
384         to_pass_builder_opt_level(opt_level),
385         opt_stage,
386         config.no_prepopulate_passes,
387         config.verify_llvm_ir,
388         using_thin_buffers,
389         config.merge_functions,
390         unroll_loops,
391         config.vectorize_slp,
392         config.vectorize_loop,
393         config.no_builtins,
394         sanitizer_options.as_ref(),
395         pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
396         pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
397     );
398 }
399
400 // Unsafe due to LLVM calls.
401 pub(crate) unsafe fn optimize(
402     cgcx: &CodegenContext<LlvmCodegenBackend>,
403     diag_handler: &Handler,
404     module: &ModuleCodegen<ModuleLlvm>,
405     config: &ModuleConfig,
406 ) -> Result<(), FatalError> {
407     let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &module.name[..]);
408
409     let llmod = module.module_llvm.llmod();
410     let llcx = &*module.module_llvm.llcx;
411     let tm = &*module.module_llvm.tm;
412     let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
413
414     let module_name = module.name.clone();
415     let module_name = Some(&module_name[..]);
416
417     if config.emit_no_opt_bc {
418         let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
419         let out = path_to_c_string(&out);
420         llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
421     }
422
423     if let Some(opt_level) = config.opt_level {
424         if should_use_new_llvm_pass_manager(config) {
425             let opt_stage = match cgcx.lto {
426                 Lto::Fat => llvm::OptStage::PreLinkFatLTO,
427                 Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
428                 _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
429                 _ => llvm::OptStage::PreLinkNoLTO,
430             };
431             optimize_with_new_llvm_pass_manager(module, config, opt_level, opt_stage);
432             return Ok(());
433         }
434
435         // Create the two optimizing pass managers. These mirror what clang
436         // does, and are by populated by LLVM's default PassManagerBuilder.
437         // Each manager has a different set of passes, but they also share
438         // some common passes.
439         let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
440         let mpm = llvm::LLVMCreatePassManager();
441
442         {
443             let find_pass = |pass_name: &str| {
444                 let pass_name = SmallCStr::new(pass_name);
445                 llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
446             };
447
448             if config.verify_llvm_ir {
449                 // Verification should run as the very first pass.
450                 llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
451             }
452
453             let mut extra_passes = Vec::new();
454             let mut have_name_anon_globals_pass = false;
455
456             for pass_name in &config.passes {
457                 if pass_name == "lint" {
458                     // Linting should also be performed early, directly on the generated IR.
459                     llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
460                     continue;
461                 }
462
463                 if let Some(pass) = find_pass(pass_name) {
464                     extra_passes.push(pass);
465                 } else {
466                     diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
467                 }
468
469                 if pass_name == "name-anon-globals" {
470                     have_name_anon_globals_pass = true;
471                 }
472             }
473
474             add_sanitizer_passes(config, &mut extra_passes);
475
476             // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
477             // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
478             // we'll get errors in LLVM.
479             let using_thin_buffers = config.bitcode_needed();
480             if !config.no_prepopulate_passes {
481                 llvm::LLVMAddAnalysisPasses(tm, fpm);
482                 llvm::LLVMAddAnalysisPasses(tm, mpm);
483                 let opt_level = to_llvm_opt_settings(opt_level).0;
484                 let prepare_for_thin_lto = cgcx.lto == Lto::Thin
485                     || cgcx.lto == Lto::ThinLocal
486                     || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
487                 with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
488                     llvm::LLVMRustAddLastExtensionPasses(
489                         b,
490                         extra_passes.as_ptr(),
491                         extra_passes.len() as size_t,
492                     );
493                     llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
494                     llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
495                 });
496
497                 have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
498                 if using_thin_buffers && !prepare_for_thin_lto {
499                     llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
500                     have_name_anon_globals_pass = true;
501                 }
502             } else {
503                 // If we don't use the standard pipeline, directly populate the MPM
504                 // with the extra passes.
505                 for pass in extra_passes {
506                     llvm::LLVMRustAddPass(mpm, pass);
507                 }
508             }
509
510             if using_thin_buffers && !have_name_anon_globals_pass {
511                 // As described above, this will probably cause an error in LLVM
512                 if config.no_prepopulate_passes {
513                     diag_handler.err(
514                         "The current compilation is going to use thin LTO buffers \
515                                       without running LLVM's NameAnonGlobals pass. \
516                                       This will likely cause errors in LLVM. Consider adding \
517                                       -C passes=name-anon-globals to the compiler command line.",
518                     );
519                 } else {
520                     bug!(
521                         "We are using thin LTO buffers without running the NameAnonGlobals pass. \
522                           This will likely cause errors in LLVM and should never happen."
523                     );
524                 }
525             }
526         }
527
528         diag_handler.abort_if_errors();
529
530         // Finally, run the actual optimization passes
531         {
532             let _timer = cgcx.prof.extra_verbose_generic_activity(
533                 "LLVM_module_optimize_function_passes",
534                 &module.name[..],
535             );
536             llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
537         }
538         {
539             let _timer = cgcx.prof.extra_verbose_generic_activity(
540                 "LLVM_module_optimize_module_passes",
541                 &module.name[..],
542             );
543             llvm::LLVMRunPassManager(mpm, llmod);
544         }
545
546         // Deallocate managers that we're now done with
547         llvm::LLVMDisposePassManager(fpm);
548         llvm::LLVMDisposePassManager(mpm);
549     }
550     Ok(())
551 }
552
553 unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
554     let sanitizer = match &config.sanitizer {
555         None => return,
556         Some(s) => s,
557     };
558
559     let recover = config.sanitizer_recover.contains(sanitizer);
560     match sanitizer {
561         Sanitizer::Address => {
562             passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
563             passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
564         }
565         Sanitizer::Memory => {
566             let track_origins = config.sanitizer_memory_track_origins as c_int;
567             passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
568         }
569         Sanitizer::Thread => {
570             passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
571         }
572         Sanitizer::Leak => {}
573     }
574 }
575
576 pub(crate) unsafe fn codegen(
577     cgcx: &CodegenContext<LlvmCodegenBackend>,
578     diag_handler: &Handler,
579     module: ModuleCodegen<ModuleLlvm>,
580     config: &ModuleConfig,
581 ) -> Result<CompiledModule, FatalError> {
582     let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
583     {
584         let llmod = module.module_llvm.llmod();
585         let llcx = &*module.module_llvm.llcx;
586         let tm = &*module.module_llvm.tm;
587         let module_name = module.name.clone();
588         let module_name = Some(&module_name[..]);
589         let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
590
591         if cgcx.msvc_imps_needed {
592             create_msvc_imps(cgcx, llcx, llmod);
593         }
594
595         // A codegen-specific pass manager is used to generate object
596         // files for an LLVM module.
597         //
598         // Apparently each of these pass managers is a one-shot kind of
599         // thing, so we create a new one for each type of output. The
600         // pass manager passed to the closure should be ensured to not
601         // escape the closure itself, and the manager should only be
602         // used once.
603         unsafe fn with_codegen<'ll, F, R>(
604             tm: &'ll llvm::TargetMachine,
605             llmod: &'ll llvm::Module,
606             no_builtins: bool,
607             f: F,
608         ) -> R
609         where
610             F: FnOnce(&'ll mut PassManager<'ll>) -> R,
611         {
612             let cpm = llvm::LLVMCreatePassManager();
613             llvm::LLVMAddAnalysisPasses(tm, cpm);
614             llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
615             f(cpm)
616         }
617
618         // If we don't have the integrated assembler, then we need to emit asm
619         // from LLVM and use `gcc` to create the object file.
620         let asm_to_obj = config.emit_obj && config.no_integrated_as;
621
622         // Change what we write and cleanup based on whether obj files are
623         // just llvm bitcode. In that case write bitcode, and possibly
624         // delete the bitcode if it wasn't requested. Don't generate the
625         // machine code, instead copy the .o file from the .bc
626         let write_bc = config.emit_bc || config.obj_is_bitcode;
627         let rm_bc = !config.emit_bc && config.obj_is_bitcode;
628         let write_obj = config.emit_obj && !config.obj_is_bitcode && !asm_to_obj;
629         let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
630
631         let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
632         let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
633
634         if write_bc || config.emit_bc_compressed || config.embed_bitcode {
635             let _timer = cgcx
636                 .prof
637                 .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
638             let thin = ThinBuffer::new(llmod);
639             let data = thin.data();
640
641             if write_bc {
642                 let _timer = cgcx.prof.generic_activity_with_arg(
643                     "LLVM_module_codegen_emit_bitcode",
644                     &module.name[..],
645                 );
646                 if let Err(e) = fs::write(&bc_out, data) {
647                     let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
648                     diag_handler.err(&msg);
649                 }
650             }
651
652             if config.embed_bitcode {
653                 let _timer = cgcx.prof.generic_activity_with_arg(
654                     "LLVM_module_codegen_embed_bitcode",
655                     &module.name[..],
656                 );
657                 embed_bitcode(cgcx, llcx, llmod, Some(data));
658             }
659
660             if config.emit_bc_compressed {
661                 let _timer = cgcx.prof.generic_activity_with_arg(
662                     "LLVM_module_codegen_emit_compressed_bitcode",
663                     &module.name[..],
664                 );
665                 let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
666                 let data = bytecode::encode(&module.name, data);
667                 if let Err(e) = fs::write(&dst, data) {
668                     let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
669                     diag_handler.err(&msg);
670                 }
671             }
672         } else if config.embed_bitcode_marker {
673             embed_bitcode(cgcx, llcx, llmod, None);
674         }
675
676         {
677             if config.emit_ir {
678                 let _timer = cgcx
679                     .prof
680                     .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
681                 let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
682                 let out_c = path_to_c_string(&out);
683
684                 extern "C" fn demangle_callback(
685                     input_ptr: *const c_char,
686                     input_len: size_t,
687                     output_ptr: *mut c_char,
688                     output_len: size_t,
689                 ) -> size_t {
690                     let input = unsafe {
691                         slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
692                     };
693
694                     let input = match str::from_utf8(input) {
695                         Ok(s) => s,
696                         Err(_) => return 0,
697                     };
698
699                     let output = unsafe {
700                         slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
701                     };
702                     let mut cursor = io::Cursor::new(output);
703
704                     let demangled = match rustc_demangle::try_demangle(input) {
705                         Ok(d) => d,
706                         Err(_) => return 0,
707                     };
708
709                     if let Err(_) = write!(cursor, "{:#}", demangled) {
710                         // Possible only if provided buffer is not big enough
711                         return 0;
712                     }
713
714                     cursor.position() as size_t
715                 }
716
717                 let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
718                 result.into_result().map_err(|()| {
719                     let msg = format!("failed to write LLVM IR to {}", out.display());
720                     llvm_err(diag_handler, &msg)
721                 })?;
722             }
723
724             if config.emit_asm || asm_to_obj {
725                 let _timer = cgcx
726                     .prof
727                     .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
728                 let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
729
730                 // We can't use the same module for asm and binary output, because that triggers
731                 // various errors like invalid IR or broken binaries, so we might have to clone the
732                 // module to produce the asm output
733                 let llmod = if config.emit_obj { llvm::LLVMCloneModule(llmod) } else { llmod };
734                 with_codegen(tm, llmod, config.no_builtins, |cpm| {
735                     write_output_file(
736                         diag_handler,
737                         tm,
738                         cpm,
739                         llmod,
740                         &path,
741                         llvm::FileType::AssemblyFile,
742                     )
743                 })?;
744             }
745
746             if write_obj {
747                 let _timer = cgcx
748                     .prof
749                     .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
750                 with_codegen(tm, llmod, config.no_builtins, |cpm| {
751                     write_output_file(
752                         diag_handler,
753                         tm,
754                         cpm,
755                         llmod,
756                         &obj_out,
757                         llvm::FileType::ObjectFile,
758                     )
759                 })?;
760             } else if asm_to_obj {
761                 let _timer = cgcx
762                     .prof
763                     .generic_activity_with_arg("LLVM_module_codegen_asm_to_obj", &module.name[..]);
764                 let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
765                 run_assembler(cgcx, diag_handler, &assembly, &obj_out);
766
767                 if !config.emit_asm && !cgcx.save_temps {
768                     drop(fs::remove_file(&assembly));
769                 }
770             }
771         }
772
773         if copy_bc_to_obj {
774             debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
775             if let Err(e) = link_or_copy(&bc_out, &obj_out) {
776                 diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
777             }
778         }
779
780         if rm_bc {
781             debug!("removing_bitcode {:?}", bc_out);
782             if let Err(e) = fs::remove_file(&bc_out) {
783                 diag_handler.err(&format!("failed to remove bitcode: {}", e));
784             }
785         }
786
787         drop(handlers);
788     }
789     Ok(module.into_compiled_module(
790         config.emit_obj,
791         config.emit_bc,
792         config.emit_bc_compressed,
793         &cgcx.output_filenames,
794     ))
795 }
796
797 /// Embed the bitcode of an LLVM module in the LLVM module itself.
798 ///
799 /// This is done primarily for iOS where it appears to be standard to compile C
800 /// code at least with `-fembed-bitcode` which creates two sections in the
801 /// executable:
802 ///
803 /// * __LLVM,__bitcode
804 /// * __LLVM,__cmdline
805 ///
806 /// It appears *both* of these sections are necessary to get the linker to
807 /// recognize what's going on. For us though we just always throw in an empty
808 /// cmdline section.
809 ///
810 /// Furthermore debug/O1 builds don't actually embed bitcode but rather just
811 /// embed an empty section.
812 ///
813 /// Basically all of this is us attempting to follow in the footsteps of clang
814 /// on iOS. See #35968 for lots more info.
815 unsafe fn embed_bitcode(
816     cgcx: &CodegenContext<LlvmCodegenBackend>,
817     llcx: &llvm::Context,
818     llmod: &llvm::Module,
819     bitcode: Option<&[u8]>,
820 ) {
821     let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[]));
822     let llglobal = llvm::LLVMAddGlobal(
823         llmod,
824         common::val_ty(llconst),
825         "rustc.embedded.module\0".as_ptr().cast(),
826     );
827     llvm::LLVMSetInitializer(llglobal, llconst);
828
829     let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
830         || cgcx.opts.target_triple.triple().contains("-darwin");
831
832     let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
833     llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
834     llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
835     llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
836
837     let llconst = common::bytes_in_context(llcx, &[]);
838     let llglobal = llvm::LLVMAddGlobal(
839         llmod,
840         common::val_ty(llconst),
841         "rustc.embedded.cmdline\0".as_ptr().cast(),
842     );
843     llvm::LLVMSetInitializer(llglobal, llconst);
844     let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
845     llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
846     llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
847 }
848
849 pub unsafe fn with_llvm_pmb(
850     llmod: &llvm::Module,
851     config: &ModuleConfig,
852     opt_level: llvm::CodeGenOptLevel,
853     prepare_for_thin_lto: bool,
854     f: &mut dyn FnMut(&llvm::PassManagerBuilder),
855 ) {
856     use std::ptr;
857
858     // Create the PassManagerBuilder for LLVM. We configure it with
859     // reasonable defaults and prepare it to actually populate the pass
860     // manager.
861     let builder = llvm::LLVMPassManagerBuilderCreate();
862     let opt_size =
863         config.opt_size.map(|x| to_llvm_opt_settings(x).1).unwrap_or(llvm::CodeGenOptSizeNone);
864     let inline_threshold = config.inline_threshold;
865     let pgo_gen_path = get_pgo_gen_path(config);
866     let pgo_use_path = get_pgo_use_path(config);
867
868     llvm::LLVMRustConfigurePassManagerBuilder(
869         builder,
870         opt_level,
871         config.merge_functions,
872         config.vectorize_slp,
873         config.vectorize_loop,
874         prepare_for_thin_lto,
875         pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
876         pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
877     );
878
879     llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
880
881     if opt_size != llvm::CodeGenOptSizeNone {
882         llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
883     }
884
885     llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
886
887     // Here we match what clang does (kinda). For O0 we only inline
888     // always-inline functions (but don't add lifetime intrinsics), at O1 we
889     // inline with lifetime intrinsics, and O2+ we add an inliner with a
890     // thresholds copied from clang.
891     match (opt_level, opt_size, inline_threshold) {
892         (.., Some(t)) => {
893             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
894         }
895         (llvm::CodeGenOptLevel::Aggressive, ..) => {
896             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
897         }
898         (_, llvm::CodeGenOptSizeDefault, _) => {
899             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
900         }
901         (_, llvm::CodeGenOptSizeAggressive, _) => {
902             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
903         }
904         (llvm::CodeGenOptLevel::None, ..) => {
905             llvm::LLVMRustAddAlwaysInlinePass(builder, false);
906         }
907         (llvm::CodeGenOptLevel::Less, ..) => {
908             llvm::LLVMRustAddAlwaysInlinePass(builder, true);
909         }
910         (llvm::CodeGenOptLevel::Default, ..) => {
911             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
912         }
913         (llvm::CodeGenOptLevel::Other, ..) => bug!("CodeGenOptLevel::Other selected"),
914     }
915
916     f(builder);
917     llvm::LLVMPassManagerBuilderDispose(builder);
918 }
919
920 // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
921 // This is required to satisfy `dllimport` references to static data in .rlibs
922 // when using MSVC linker.  We do this only for data, as linker can fix up
923 // code references on its own.
924 // See #26591, #27438
925 fn create_msvc_imps(
926     cgcx: &CodegenContext<LlvmCodegenBackend>,
927     llcx: &llvm::Context,
928     llmod: &llvm::Module,
929 ) {
930     if !cgcx.msvc_imps_needed {
931         return;
932     }
933     // The x86 ABI seems to require that leading underscores are added to symbol
934     // names, so we need an extra underscore on x86. There's also a leading
935     // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
936     // underscores added in front).
937     let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
938
939     unsafe {
940         let i8p_ty = Type::i8p_llcx(llcx);
941         let globals = base::iter_globals(llmod)
942             .filter(|&val| {
943                 llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
944                     && llvm::LLVMIsDeclaration(val) == 0
945             })
946             .filter_map(|val| {
947                 // Exclude some symbols that we know are not Rust symbols.
948                 let name = llvm::get_value_name(val);
949                 if ignored(name) { None } else { Some((val, name)) }
950             })
951             .map(move |(val, name)| {
952                 let mut imp_name = prefix.as_bytes().to_vec();
953                 imp_name.extend(name);
954                 let imp_name = CString::new(imp_name).unwrap();
955                 (imp_name, val)
956             })
957             .collect::<Vec<_>>();
958
959         for (imp_name, val) in globals {
960             let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
961             llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
962             llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
963         }
964     }
965
966     // Use this function to exclude certain symbols from `__imp` generation.
967     fn ignored(symbol_name: &[u8]) -> bool {
968         // These are symbols generated by LLVM's profiling instrumentation
969         symbol_name.starts_with(b"__llvm_profile_")
970     }
971 }