]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/back/write.rs
Rollup merge of #69579 - petrochenkov:noprevspan, r=Centril
[rust.git] / src / librustc_codegen_llvm / back / write.rs
1 use crate::attributes;
2 use crate::back::bytecode;
3 use crate::back::lto::ThinBuffer;
4 use crate::back::profiling::{
5     selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
6 };
7 use crate::base;
8 use crate::common;
9 use crate::consts;
10 use crate::context::{get_reloc_model, is_pie_binary};
11 use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
12 use crate::llvm_util;
13 use crate::type_::Type;
14 use crate::LlvmCodegenBackend;
15 use crate::ModuleLlvm;
16 use log::debug;
17 use rustc::bug;
18 use rustc::session::config::{self, Lto, OutputType, Passes, Sanitizer, SwitchWithOptPath};
19 use rustc::session::Session;
20 use rustc::ty::TyCtxt;
21 use rustc_codegen_ssa::back::write::{run_assembler, CodegenContext, ModuleConfig};
22 use rustc_codegen_ssa::traits::*;
23 use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, RLIB_BYTECODE_EXTENSION};
24 use rustc_data_structures::small_c_str::SmallCStr;
25 use rustc_errors::{FatalError, Handler};
26 use rustc_fs_util::{link_or_copy, path_to_c_string};
27 use rustc_hir::def_id::LOCAL_CRATE;
28
29 use libc::{c_char, c_int, c_uint, c_void, size_t};
30 use std::ffi::CString;
31 use std::fs;
32 use std::io::{self, Write};
33 use std::path::{Path, PathBuf};
34 use std::slice;
35 use std::str;
36 use std::sync::Arc;
37
38 pub const RELOC_MODEL_ARGS: [(&str, llvm::RelocMode); 7] = [
39     ("pic", llvm::RelocMode::PIC),
40     ("static", llvm::RelocMode::Static),
41     ("default", llvm::RelocMode::Default),
42     ("dynamic-no-pic", llvm::RelocMode::DynamicNoPic),
43     ("ropi", llvm::RelocMode::ROPI),
44     ("rwpi", llvm::RelocMode::RWPI),
45     ("ropi-rwpi", llvm::RelocMode::ROPI_RWPI),
46 ];
47
48 pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[
49     ("small", llvm::CodeModel::Small),
50     ("kernel", llvm::CodeModel::Kernel),
51     ("medium", llvm::CodeModel::Medium),
52     ("large", llvm::CodeModel::Large),
53 ];
54
55 pub const TLS_MODEL_ARGS: [(&str, llvm::ThreadLocalMode); 4] = [
56     ("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic),
57     ("local-dynamic", llvm::ThreadLocalMode::LocalDynamic),
58     ("initial-exec", llvm::ThreadLocalMode::InitialExec),
59     ("local-exec", llvm::ThreadLocalMode::LocalExec),
60 ];
61
62 pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
63     match llvm::last_error() {
64         Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
65         None => handler.fatal(&msg),
66     }
67 }
68
69 pub fn write_output_file(
70     handler: &rustc_errors::Handler,
71     target: &'ll llvm::TargetMachine,
72     pm: &llvm::PassManager<'ll>,
73     m: &'ll llvm::Module,
74     output: &Path,
75     file_type: llvm::FileType,
76 ) -> Result<(), FatalError> {
77     unsafe {
78         let output_c = path_to_c_string(output);
79         let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type);
80         result.into_result().map_err(|()| {
81             let msg = format!("could not write output to {}", output.display());
82             llvm_err(handler, &msg)
83         })
84     }
85 }
86
87 pub fn create_informational_target_machine(
88     sess: &Session,
89     find_features: bool,
90 ) -> &'static mut llvm::TargetMachine {
91     target_machine_factory(sess, config::OptLevel::No, find_features)()
92         .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
93 }
94
95 pub fn create_target_machine(
96     tcx: TyCtxt<'_>,
97     find_features: bool,
98 ) -> &'static mut llvm::TargetMachine {
99     target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)()
100         .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
101 }
102
103 pub fn to_llvm_opt_settings(
104     cfg: config::OptLevel,
105 ) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
106     use self::config::OptLevel::*;
107     match cfg {
108         No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
109         Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
110         Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
111         Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
112         Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
113         SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
114     }
115 }
116
117 fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
118     use config::OptLevel::*;
119     match cfg {
120         No => llvm::PassBuilderOptLevel::O0,
121         Less => llvm::PassBuilderOptLevel::O1,
122         Default => llvm::PassBuilderOptLevel::O2,
123         Aggressive => llvm::PassBuilderOptLevel::O3,
124         Size => llvm::PassBuilderOptLevel::Os,
125         SizeMin => llvm::PassBuilderOptLevel::Oz,
126     }
127 }
128
129 // If find_features is true this won't access `sess.crate_types` by assuming
130 // that `is_pie_binary` is false. When we discover LLVM target features
131 // `sess.crate_types` is uninitialized so we cannot access it.
132 pub fn target_machine_factory(
133     sess: &Session,
134     optlvl: config::OptLevel,
135     find_features: bool,
136 ) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
137     let reloc_model = get_reloc_model(sess);
138
139     let (opt_level, _) = to_llvm_opt_settings(optlvl);
140     let use_softfp = sess.opts.cg.soft_float;
141
142     let ffunction_sections = sess.target.target.options.function_sections;
143     let fdata_sections = ffunction_sections;
144
145     let code_model_arg =
146         sess.opts.cg.code_model.as_ref().or(sess.target.target.options.code_model.as_ref());
147
148     let code_model = match code_model_arg {
149         Some(s) => match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
150             Some(x) => x.1,
151             _ => {
152                 sess.err(&format!("{:?} is not a valid code model", code_model_arg));
153                 sess.abort_if_errors();
154                 bug!();
155             }
156         },
157         None => llvm::CodeModel::None,
158     };
159
160     let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
161     let mut singlethread = sess.target.target.options.singlethread;
162
163     // On the wasm target once the `atomics` feature is enabled that means that
164     // we're no longer single-threaded, or otherwise we don't want LLVM to
165     // lower atomic operations to single-threaded operations.
166     if singlethread
167         && sess.target.target.llvm_target.contains("wasm32")
168         && features.iter().any(|s| *s == "+atomics")
169     {
170         singlethread = false;
171     }
172
173     let triple = SmallCStr::new(&sess.target.target.llvm_target);
174     let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
175     let features = features.join(",");
176     let features = CString::new(features).unwrap();
177     let abi = SmallCStr::new(&sess.target.target.options.llvm_abiname);
178     let is_pie_binary = !find_features && is_pie_binary(sess);
179     let trap_unreachable = sess.target.target.options.trap_unreachable;
180     let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
181
182     let asm_comments = sess.asm_comments();
183     let relax_elf_relocations = sess.target.target.options.relax_elf_relocations;
184     Arc::new(move || {
185         let tm = unsafe {
186             llvm::LLVMRustCreateTargetMachine(
187                 triple.as_ptr(),
188                 cpu.as_ptr(),
189                 features.as_ptr(),
190                 abi.as_ptr(),
191                 code_model,
192                 reloc_model,
193                 opt_level,
194                 use_softfp,
195                 is_pie_binary,
196                 ffunction_sections,
197                 fdata_sections,
198                 trap_unreachable,
199                 singlethread,
200                 asm_comments,
201                 emit_stack_size_section,
202                 relax_elf_relocations,
203             )
204         };
205
206         tm.ok_or_else(|| {
207             format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
208         })
209     })
210 }
211
212 pub(crate) fn save_temp_bitcode(
213     cgcx: &CodegenContext<LlvmCodegenBackend>,
214     module: &ModuleCodegen<ModuleLlvm>,
215     name: &str,
216 ) {
217     if !cgcx.save_temps {
218         return;
219     }
220     unsafe {
221         let ext = format!("{}.bc", name);
222         let cgu = Some(&module.name[..]);
223         let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
224         let cstr = path_to_c_string(&path);
225         let llmod = module.module_llvm.llmod();
226         llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
227     }
228 }
229
230 pub struct DiagnosticHandlers<'a> {
231     data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
232     llcx: &'a llvm::Context,
233 }
234
235 impl<'a> DiagnosticHandlers<'a> {
236     pub fn new(
237         cgcx: &'a CodegenContext<LlvmCodegenBackend>,
238         handler: &'a Handler,
239         llcx: &'a llvm::Context,
240     ) -> Self {
241         let data = Box::into_raw(Box::new((cgcx, handler)));
242         unsafe {
243             llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
244             llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
245         }
246         DiagnosticHandlers { data, llcx }
247     }
248 }
249
250 impl<'a> Drop for DiagnosticHandlers<'a> {
251     fn drop(&mut self) {
252         use std::ptr::null_mut;
253         unsafe {
254             llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
255             llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
256             drop(Box::from_raw(self.data));
257         }
258     }
259 }
260
261 unsafe extern "C" fn report_inline_asm(
262     cgcx: &CodegenContext<LlvmCodegenBackend>,
263     msg: &str,
264     cookie: c_uint,
265 ) {
266     cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
267 }
268
269 unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
270     if user.is_null() {
271         return;
272     }
273     let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
274
275     let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
276         .expect("non-UTF8 SMDiagnostic");
277
278     report_inline_asm(cgcx, &msg, cookie);
279 }
280
281 unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
282     if user.is_null() {
283         return;
284     }
285     let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
286
287     match llvm::diagnostic::Diagnostic::unpack(info) {
288         llvm::diagnostic::InlineAsm(inline) => {
289             report_inline_asm(cgcx, &llvm::twine_to_string(inline.message), inline.cookie);
290         }
291
292         llvm::diagnostic::Optimization(opt) => {
293             let enabled = match cgcx.remark {
294                 Passes::All => true,
295                 Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
296             };
297
298             if enabled {
299                 diag_handler.note_without_error(&format!(
300                     "optimization {} for {} at {}:{}:{}: {}",
301                     opt.kind.describe(),
302                     opt.pass_name,
303                     opt.filename,
304                     opt.line,
305                     opt.column,
306                     opt.message
307                 ));
308             }
309         }
310         llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
311             let msg = llvm::build_string(|s| {
312                 llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
313             })
314             .expect("non-UTF8 diagnostic");
315             diag_handler.warn(&msg);
316         }
317         llvm::diagnostic::UnknownDiagnostic(..) => {}
318     }
319 }
320
321 fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
322     match config.pgo_gen {
323         SwitchWithOptPath::Enabled(ref opt_dir_path) => {
324             let path = if let Some(dir_path) = opt_dir_path {
325                 dir_path.join("default_%m.profraw")
326             } else {
327                 PathBuf::from("default_%m.profraw")
328             };
329
330             Some(CString::new(format!("{}", path.display())).unwrap())
331         }
332         SwitchWithOptPath::Disabled => None,
333     }
334 }
335
336 fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
337     config
338         .pgo_use
339         .as_ref()
340         .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
341 }
342
343 pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool {
344     // We only support the new pass manager starting with LLVM 9.
345     if llvm_util::get_major_version() < 9 {
346         return false;
347     }
348
349     // The new pass manager is disabled by default.
350     config.new_llvm_pass_manager.unwrap_or(false)
351 }
352
353 pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
354     cgcx: &CodegenContext<LlvmCodegenBackend>,
355     module: &ModuleCodegen<ModuleLlvm>,
356     config: &ModuleConfig,
357     opt_level: config::OptLevel,
358     opt_stage: llvm::OptStage,
359 ) {
360     let unroll_loops =
361         opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
362     let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
363     let pgo_gen_path = get_pgo_gen_path(config);
364     let pgo_use_path = get_pgo_use_path(config);
365     let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
366     // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
367     let sanitizer_options = if !is_lto {
368         config.sanitizer.as_ref().map(|s| llvm::SanitizerOptions {
369             sanitize_memory: *s == Sanitizer::Memory,
370             sanitize_thread: *s == Sanitizer::Thread,
371             sanitize_address: *s == Sanitizer::Address,
372             sanitize_recover: config.sanitizer_recover.contains(s),
373             sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
374         })
375     } else {
376         None
377     };
378
379     let llvm_selfprofiler = if cgcx.prof.llvm_recording_enabled() {
380         let mut llvm_profiler = LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap());
381         &mut llvm_profiler as *mut _ as *mut c_void
382     } else {
383         std::ptr::null_mut()
384     };
385
386     // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
387     // We would have to add upstream support for this first, before we can support
388     // config.inline_threshold and our more aggressive default thresholds.
389     // FIXME: NewPM uses an different and more explicit way to textually represent
390     // pass pipelines. It would probably make sense to expose this, but it would
391     // require a different format than the current -C passes.
392     llvm::LLVMRustOptimizeWithNewPassManager(
393         module.module_llvm.llmod(),
394         &*module.module_llvm.tm,
395         to_pass_builder_opt_level(opt_level),
396         opt_stage,
397         config.no_prepopulate_passes,
398         config.verify_llvm_ir,
399         using_thin_buffers,
400         config.merge_functions,
401         unroll_loops,
402         config.vectorize_slp,
403         config.vectorize_loop,
404         config.no_builtins,
405         sanitizer_options.as_ref(),
406         pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
407         pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
408         llvm_selfprofiler,
409         selfprofile_before_pass_callback,
410         selfprofile_after_pass_callback,
411     );
412 }
413
414 // Unsafe due to LLVM calls.
415 pub(crate) unsafe fn optimize(
416     cgcx: &CodegenContext<LlvmCodegenBackend>,
417     diag_handler: &Handler,
418     module: &ModuleCodegen<ModuleLlvm>,
419     config: &ModuleConfig,
420 ) -> Result<(), FatalError> {
421     let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &module.name[..]);
422
423     let llmod = module.module_llvm.llmod();
424     let llcx = &*module.module_llvm.llcx;
425     let tm = &*module.module_llvm.tm;
426     let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
427
428     let module_name = module.name.clone();
429     let module_name = Some(&module_name[..]);
430
431     if config.emit_no_opt_bc {
432         let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
433         let out = path_to_c_string(&out);
434         llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
435     }
436
437     if let Some(opt_level) = config.opt_level {
438         if should_use_new_llvm_pass_manager(config) {
439             let opt_stage = match cgcx.lto {
440                 Lto::Fat => llvm::OptStage::PreLinkFatLTO,
441                 Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
442                 _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
443                 _ => llvm::OptStage::PreLinkNoLTO,
444             };
445             optimize_with_new_llvm_pass_manager(cgcx, module, config, opt_level, opt_stage);
446             return Ok(());
447         }
448
449         if cgcx.prof.llvm_recording_enabled() {
450             diag_handler
451                 .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`");
452         }
453
454         // Create the two optimizing pass managers. These mirror what clang
455         // does, and are by populated by LLVM's default PassManagerBuilder.
456         // Each manager has a different set of passes, but they also share
457         // some common passes.
458         let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
459         let mpm = llvm::LLVMCreatePassManager();
460
461         {
462             let find_pass = |pass_name: &str| {
463                 let pass_name = SmallCStr::new(pass_name);
464                 llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
465             };
466
467             if config.verify_llvm_ir {
468                 // Verification should run as the very first pass.
469                 llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
470             }
471
472             let mut extra_passes = Vec::new();
473             let mut have_name_anon_globals_pass = false;
474
475             for pass_name in &config.passes {
476                 if pass_name == "lint" {
477                     // Linting should also be performed early, directly on the generated IR.
478                     llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
479                     continue;
480                 }
481
482                 if let Some(pass) = find_pass(pass_name) {
483                     extra_passes.push(pass);
484                 } else {
485                     diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
486                 }
487
488                 if pass_name == "name-anon-globals" {
489                     have_name_anon_globals_pass = true;
490                 }
491             }
492
493             add_sanitizer_passes(config, &mut extra_passes);
494
495             // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
496             // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
497             // we'll get errors in LLVM.
498             let using_thin_buffers = config.bitcode_needed();
499             if !config.no_prepopulate_passes {
500                 llvm::LLVMAddAnalysisPasses(tm, fpm);
501                 llvm::LLVMAddAnalysisPasses(tm, mpm);
502                 let opt_level = to_llvm_opt_settings(opt_level).0;
503                 let prepare_for_thin_lto = cgcx.lto == Lto::Thin
504                     || cgcx.lto == Lto::ThinLocal
505                     || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
506                 with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
507                     llvm::LLVMRustAddLastExtensionPasses(
508                         b,
509                         extra_passes.as_ptr(),
510                         extra_passes.len() as size_t,
511                     );
512                     llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
513                     llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
514                 });
515
516                 have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
517                 if using_thin_buffers && !prepare_for_thin_lto {
518                     llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
519                     have_name_anon_globals_pass = true;
520                 }
521             } else {
522                 // If we don't use the standard pipeline, directly populate the MPM
523                 // with the extra passes.
524                 for pass in extra_passes {
525                     llvm::LLVMRustAddPass(mpm, pass);
526                 }
527             }
528
529             if using_thin_buffers && !have_name_anon_globals_pass {
530                 // As described above, this will probably cause an error in LLVM
531                 if config.no_prepopulate_passes {
532                     diag_handler.err(
533                         "The current compilation is going to use thin LTO buffers \
534                                       without running LLVM's NameAnonGlobals pass. \
535                                       This will likely cause errors in LLVM. Consider adding \
536                                       -C passes=name-anon-globals to the compiler command line.",
537                     );
538                 } else {
539                     bug!(
540                         "We are using thin LTO buffers without running the NameAnonGlobals pass. \
541                           This will likely cause errors in LLVM and should never happen."
542                     );
543                 }
544             }
545         }
546
547         diag_handler.abort_if_errors();
548
549         // Finally, run the actual optimization passes
550         {
551             let _timer = cgcx.prof.extra_verbose_generic_activity(
552                 "LLVM_module_optimize_function_passes",
553                 &module.name[..],
554             );
555             llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
556         }
557         {
558             let _timer = cgcx.prof.extra_verbose_generic_activity(
559                 "LLVM_module_optimize_module_passes",
560                 &module.name[..],
561             );
562             llvm::LLVMRunPassManager(mpm, llmod);
563         }
564
565         // Deallocate managers that we're now done with
566         llvm::LLVMDisposePassManager(fpm);
567         llvm::LLVMDisposePassManager(mpm);
568     }
569     Ok(())
570 }
571
572 unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
573     let sanitizer = match &config.sanitizer {
574         None => return,
575         Some(s) => s,
576     };
577
578     let recover = config.sanitizer_recover.contains(sanitizer);
579     match sanitizer {
580         Sanitizer::Address => {
581             passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
582             passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
583         }
584         Sanitizer::Memory => {
585             let track_origins = config.sanitizer_memory_track_origins as c_int;
586             passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
587         }
588         Sanitizer::Thread => {
589             passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
590         }
591         Sanitizer::Leak => {}
592     }
593 }
594
595 pub(crate) unsafe fn codegen(
596     cgcx: &CodegenContext<LlvmCodegenBackend>,
597     diag_handler: &Handler,
598     module: ModuleCodegen<ModuleLlvm>,
599     config: &ModuleConfig,
600 ) -> Result<CompiledModule, FatalError> {
601     let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
602     {
603         let llmod = module.module_llvm.llmod();
604         let llcx = &*module.module_llvm.llcx;
605         let tm = &*module.module_llvm.tm;
606         let module_name = module.name.clone();
607         let module_name = Some(&module_name[..]);
608         let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
609
610         if cgcx.msvc_imps_needed {
611             create_msvc_imps(cgcx, llcx, llmod);
612         }
613
614         // A codegen-specific pass manager is used to generate object
615         // files for an LLVM module.
616         //
617         // Apparently each of these pass managers is a one-shot kind of
618         // thing, so we create a new one for each type of output. The
619         // pass manager passed to the closure should be ensured to not
620         // escape the closure itself, and the manager should only be
621         // used once.
622         unsafe fn with_codegen<'ll, F, R>(
623             tm: &'ll llvm::TargetMachine,
624             llmod: &'ll llvm::Module,
625             no_builtins: bool,
626             f: F,
627         ) -> R
628         where
629             F: FnOnce(&'ll mut PassManager<'ll>) -> R,
630         {
631             let cpm = llvm::LLVMCreatePassManager();
632             llvm::LLVMAddAnalysisPasses(tm, cpm);
633             llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
634             f(cpm)
635         }
636
637         // If we don't have the integrated assembler, then we need to emit asm
638         // from LLVM and use `gcc` to create the object file.
639         let asm_to_obj = config.emit_obj && config.no_integrated_as;
640
641         // Change what we write and cleanup based on whether obj files are
642         // just llvm bitcode. In that case write bitcode, and possibly
643         // delete the bitcode if it wasn't requested. Don't generate the
644         // machine code, instead copy the .o file from the .bc
645         let write_bc = config.emit_bc || config.obj_is_bitcode;
646         let rm_bc = !config.emit_bc && config.obj_is_bitcode;
647         let write_obj = config.emit_obj && !config.obj_is_bitcode && !asm_to_obj;
648         let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
649
650         let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
651         let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
652
653         if write_bc || config.emit_bc_compressed || config.embed_bitcode {
654             let _timer = cgcx
655                 .prof
656                 .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
657             let thin = ThinBuffer::new(llmod);
658             let data = thin.data();
659
660             if write_bc {
661                 let _timer = cgcx.prof.generic_activity_with_arg(
662                     "LLVM_module_codegen_emit_bitcode",
663                     &module.name[..],
664                 );
665                 if let Err(e) = fs::write(&bc_out, data) {
666                     let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
667                     diag_handler.err(&msg);
668                 }
669             }
670
671             if config.embed_bitcode {
672                 let _timer = cgcx.prof.generic_activity_with_arg(
673                     "LLVM_module_codegen_embed_bitcode",
674                     &module.name[..],
675                 );
676                 embed_bitcode(cgcx, llcx, llmod, Some(data));
677             }
678
679             if config.emit_bc_compressed {
680                 let _timer = cgcx.prof.generic_activity_with_arg(
681                     "LLVM_module_codegen_emit_compressed_bitcode",
682                     &module.name[..],
683                 );
684                 let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
685                 let data = bytecode::encode(&module.name, data);
686                 if let Err(e) = fs::write(&dst, data) {
687                     let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
688                     diag_handler.err(&msg);
689                 }
690             }
691         } else if config.embed_bitcode_marker {
692             embed_bitcode(cgcx, llcx, llmod, None);
693         }
694
695         {
696             if config.emit_ir {
697                 let _timer = cgcx
698                     .prof
699                     .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
700                 let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
701                 let out_c = path_to_c_string(&out);
702
703                 extern "C" fn demangle_callback(
704                     input_ptr: *const c_char,
705                     input_len: size_t,
706                     output_ptr: *mut c_char,
707                     output_len: size_t,
708                 ) -> size_t {
709                     let input = unsafe {
710                         slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
711                     };
712
713                     let input = match str::from_utf8(input) {
714                         Ok(s) => s,
715                         Err(_) => return 0,
716                     };
717
718                     let output = unsafe {
719                         slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
720                     };
721                     let mut cursor = io::Cursor::new(output);
722
723                     let demangled = match rustc_demangle::try_demangle(input) {
724                         Ok(d) => d,
725                         Err(_) => return 0,
726                     };
727
728                     if let Err(_) = write!(cursor, "{:#}", demangled) {
729                         // Possible only if provided buffer is not big enough
730                         return 0;
731                     }
732
733                     cursor.position() as size_t
734                 }
735
736                 let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
737                 result.into_result().map_err(|()| {
738                     let msg = format!("failed to write LLVM IR to {}", out.display());
739                     llvm_err(diag_handler, &msg)
740                 })?;
741             }
742
743             if config.emit_asm || asm_to_obj {
744                 let _timer = cgcx
745                     .prof
746                     .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
747                 let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
748
749                 // We can't use the same module for asm and binary output, because that triggers
750                 // various errors like invalid IR or broken binaries, so we might have to clone the
751                 // module to produce the asm output
752                 let llmod = if config.emit_obj { llvm::LLVMCloneModule(llmod) } else { llmod };
753                 with_codegen(tm, llmod, config.no_builtins, |cpm| {
754                     write_output_file(
755                         diag_handler,
756                         tm,
757                         cpm,
758                         llmod,
759                         &path,
760                         llvm::FileType::AssemblyFile,
761                     )
762                 })?;
763             }
764
765             if write_obj {
766                 let _timer = cgcx
767                     .prof
768                     .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
769                 with_codegen(tm, llmod, config.no_builtins, |cpm| {
770                     write_output_file(
771                         diag_handler,
772                         tm,
773                         cpm,
774                         llmod,
775                         &obj_out,
776                         llvm::FileType::ObjectFile,
777                     )
778                 })?;
779             } else if asm_to_obj {
780                 let _timer = cgcx
781                     .prof
782                     .generic_activity_with_arg("LLVM_module_codegen_asm_to_obj", &module.name[..]);
783                 let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
784                 run_assembler(cgcx, diag_handler, &assembly, &obj_out);
785
786                 if !config.emit_asm && !cgcx.save_temps {
787                     drop(fs::remove_file(&assembly));
788                 }
789             }
790         }
791
792         if copy_bc_to_obj {
793             debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
794             if let Err(e) = link_or_copy(&bc_out, &obj_out) {
795                 diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
796             }
797         }
798
799         if rm_bc {
800             debug!("removing_bitcode {:?}", bc_out);
801             if let Err(e) = fs::remove_file(&bc_out) {
802                 diag_handler.err(&format!("failed to remove bitcode: {}", e));
803             }
804         }
805
806         drop(handlers);
807     }
808     Ok(module.into_compiled_module(
809         config.emit_obj,
810         config.emit_bc,
811         config.emit_bc_compressed,
812         &cgcx.output_filenames,
813     ))
814 }
815
816 /// Embed the bitcode of an LLVM module in the LLVM module itself.
817 ///
818 /// This is done primarily for iOS where it appears to be standard to compile C
819 /// code at least with `-fembed-bitcode` which creates two sections in the
820 /// executable:
821 ///
822 /// * __LLVM,__bitcode
823 /// * __LLVM,__cmdline
824 ///
825 /// It appears *both* of these sections are necessary to get the linker to
826 /// recognize what's going on. For us though we just always throw in an empty
827 /// cmdline section.
828 ///
829 /// Furthermore debug/O1 builds don't actually embed bitcode but rather just
830 /// embed an empty section.
831 ///
832 /// Basically all of this is us attempting to follow in the footsteps of clang
833 /// on iOS. See #35968 for lots more info.
834 unsafe fn embed_bitcode(
835     cgcx: &CodegenContext<LlvmCodegenBackend>,
836     llcx: &llvm::Context,
837     llmod: &llvm::Module,
838     bitcode: Option<&[u8]>,
839 ) {
840     let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[]));
841     let llglobal = llvm::LLVMAddGlobal(
842         llmod,
843         common::val_ty(llconst),
844         "rustc.embedded.module\0".as_ptr().cast(),
845     );
846     llvm::LLVMSetInitializer(llglobal, llconst);
847
848     let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
849         || cgcx.opts.target_triple.triple().contains("-darwin");
850
851     let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
852     llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
853     llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
854     llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
855
856     let llconst = common::bytes_in_context(llcx, &[]);
857     let llglobal = llvm::LLVMAddGlobal(
858         llmod,
859         common::val_ty(llconst),
860         "rustc.embedded.cmdline\0".as_ptr().cast(),
861     );
862     llvm::LLVMSetInitializer(llglobal, llconst);
863     let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
864     llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
865     llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
866 }
867
868 pub unsafe fn with_llvm_pmb(
869     llmod: &llvm::Module,
870     config: &ModuleConfig,
871     opt_level: llvm::CodeGenOptLevel,
872     prepare_for_thin_lto: bool,
873     f: &mut dyn FnMut(&llvm::PassManagerBuilder),
874 ) {
875     use std::ptr;
876
877     // Create the PassManagerBuilder for LLVM. We configure it with
878     // reasonable defaults and prepare it to actually populate the pass
879     // manager.
880     let builder = llvm::LLVMPassManagerBuilderCreate();
881     let opt_size =
882         config.opt_size.map(|x| to_llvm_opt_settings(x).1).unwrap_or(llvm::CodeGenOptSizeNone);
883     let inline_threshold = config.inline_threshold;
884     let pgo_gen_path = get_pgo_gen_path(config);
885     let pgo_use_path = get_pgo_use_path(config);
886
887     llvm::LLVMRustConfigurePassManagerBuilder(
888         builder,
889         opt_level,
890         config.merge_functions,
891         config.vectorize_slp,
892         config.vectorize_loop,
893         prepare_for_thin_lto,
894         pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
895         pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
896     );
897
898     llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
899
900     if opt_size != llvm::CodeGenOptSizeNone {
901         llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
902     }
903
904     llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
905
906     // Here we match what clang does (kinda). For O0 we only inline
907     // always-inline functions (but don't add lifetime intrinsics), at O1 we
908     // inline with lifetime intrinsics, and O2+ we add an inliner with a
909     // thresholds copied from clang.
910     match (opt_level, opt_size, inline_threshold) {
911         (.., Some(t)) => {
912             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
913         }
914         (llvm::CodeGenOptLevel::Aggressive, ..) => {
915             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
916         }
917         (_, llvm::CodeGenOptSizeDefault, _) => {
918             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
919         }
920         (_, llvm::CodeGenOptSizeAggressive, _) => {
921             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
922         }
923         (llvm::CodeGenOptLevel::None, ..) => {
924             llvm::LLVMRustAddAlwaysInlinePass(builder, false);
925         }
926         (llvm::CodeGenOptLevel::Less, ..) => {
927             llvm::LLVMRustAddAlwaysInlinePass(builder, true);
928         }
929         (llvm::CodeGenOptLevel::Default, ..) => {
930             llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
931         }
932         (llvm::CodeGenOptLevel::Other, ..) => bug!("CodeGenOptLevel::Other selected"),
933     }
934
935     f(builder);
936     llvm::LLVMPassManagerBuilderDispose(builder);
937 }
938
939 // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
940 // This is required to satisfy `dllimport` references to static data in .rlibs
941 // when using MSVC linker.  We do this only for data, as linker can fix up
942 // code references on its own.
943 // See #26591, #27438
944 fn create_msvc_imps(
945     cgcx: &CodegenContext<LlvmCodegenBackend>,
946     llcx: &llvm::Context,
947     llmod: &llvm::Module,
948 ) {
949     if !cgcx.msvc_imps_needed {
950         return;
951     }
952     // The x86 ABI seems to require that leading underscores are added to symbol
953     // names, so we need an extra underscore on x86. There's also a leading
954     // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
955     // underscores added in front).
956     let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
957
958     unsafe {
959         let i8p_ty = Type::i8p_llcx(llcx);
960         let globals = base::iter_globals(llmod)
961             .filter(|&val| {
962                 llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
963                     && llvm::LLVMIsDeclaration(val) == 0
964             })
965             .filter_map(|val| {
966                 // Exclude some symbols that we know are not Rust symbols.
967                 let name = llvm::get_value_name(val);
968                 if ignored(name) { None } else { Some((val, name)) }
969             })
970             .map(move |(val, name)| {
971                 let mut imp_name = prefix.as_bytes().to_vec();
972                 imp_name.extend(name);
973                 let imp_name = CString::new(imp_name).unwrap();
974                 (imp_name, val)
975             })
976             .collect::<Vec<_>>();
977
978         for (imp_name, val) in globals {
979             let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
980             llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
981             llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
982         }
983     }
984
985     // Use this function to exclude certain symbols from `__imp` generation.
986     fn ignored(symbol_name: &[u8]) -> bool {
987         // These are symbols generated by LLVM's profiling instrumentation
988         symbol_name.starts_with(b"__llvm_profile_")
989     }
990 }