8 #include "llvm/Analysis/TargetLibraryInfo.h"
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/CodeGen/TargetSubtargetInfo.h"
11 #include "llvm/InitializePasses.h"
12 #include "llvm/IR/AutoUpgrade.h"
13 #include "llvm/IR/AssemblyAnnotationWriter.h"
14 #include "llvm/IR/IntrinsicInst.h"
15 #include "llvm/IR/Verifier.h"
16 #include "llvm/Passes/PassBuilder.h"
17 #if LLVM_VERSION_GE(9, 0)
18 #include "llvm/Passes/StandardInstrumentations.h"
20 #include "llvm/Support/CBindingWrapping.h"
21 #include "llvm/Support/FileSystem.h"
22 #include "llvm/Support/Host.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
25 #include "llvm/Transforms/IPO/AlwaysInliner.h"
26 #include "llvm/Transforms/IPO/FunctionImport.h"
27 #include "llvm/Transforms/Utils/FunctionImportUtils.h"
28 #include "llvm/LTO/LTO.h"
29 #include "llvm-c/Transforms/PassManagerBuilder.h"
31 #include "llvm/Transforms/Instrumentation.h"
32 #if LLVM_VERSION_GE(9, 0)
33 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
34 #include "llvm/Support/TimeProfiler.h"
36 #if LLVM_VERSION_GE(8, 0)
37 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
38 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
40 #if LLVM_VERSION_GE(9, 0)
41 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
43 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
47 typedef struct LLVMOpaquePass *LLVMPassRef;
48 typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
50 DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
51 DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
52 DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBuilder,
53 LLVMPassManagerBuilderRef)
55 extern "C" void LLVMInitializePasses() {
56 PassRegistry &Registry = *PassRegistry::getPassRegistry();
57 initializeCore(Registry);
58 initializeCodeGen(Registry);
59 initializeScalarOpts(Registry);
60 initializeVectorization(Registry);
61 initializeIPO(Registry);
62 initializeAnalysis(Registry);
63 initializeTransformUtils(Registry);
64 initializeInstCombine(Registry);
65 initializeInstrumentation(Registry);
66 initializeTarget(Registry);
69 extern "C" void LLVMTimeTraceProfilerInitialize() {
70 #if LLVM_VERSION_GE(9, 0)
71 timeTraceProfilerInitialize();
75 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
76 #if LLVM_VERSION_GE(9, 0)
77 StringRef FN(FileName);
79 raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
81 timeTraceProfilerWrite(OS);
82 timeTraceProfilerCleanup();
86 enum class LLVMRustPassKind {
92 static LLVMRustPassKind toRust(PassKind Kind) {
95 return LLVMRustPassKind::Function;
97 return LLVMRustPassKind::Module;
99 return LLVMRustPassKind::Other;
103 extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
104 StringRef SR(PassName);
105 PassRegistry *PR = PassRegistry::getPassRegistry();
107 const PassInfo *PI = PR->getPassInfo(SR);
109 return wrap(PI->createPass());
114 extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
115 const bool CompileKernel = false;
116 const bool UseAfterScope = true;
118 return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
121 extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
122 const bool CompileKernel = false;
124 #if LLVM_VERSION_GE(9, 0)
125 return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
127 return wrap(createAddressSanitizerModulePass(CompileKernel, Recover));
131 extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
132 #if LLVM_VERSION_GE(9, 0)
133 const bool CompileKernel = false;
135 return wrap(createMemorySanitizerLegacyPassPass(
136 MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
137 #elif LLVM_VERSION_GE(8, 0)
138 return wrap(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover));
140 return wrap(createMemorySanitizerPass(TrackOrigins, Recover));
144 extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
145 #if LLVM_VERSION_GE(8, 0)
146 return wrap(createThreadSanitizerLegacyPassPass());
148 return wrap(createThreadSanitizerPass());
152 extern "C" LLVMRustPassKind LLVMRustPassKind(LLVMPassRef RustPass) {
154 Pass *Pass = unwrap(RustPass);
155 return toRust(Pass->getPassKind());
158 extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
160 Pass *Pass = unwrap(RustPass);
161 PassManagerBase *PMB = unwrap(PMR);
166 void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
167 LLVMPassManagerBuilderRef PMBR,
168 LLVMPassManagerRef PMR
170 unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
174 void LLVMRustAddLastExtensionPasses(
175 LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
176 auto AddExtensionPasses = [Passes, NumPasses](
177 const PassManagerBuilder &Builder, PassManagerBase &PM) {
178 for (size_t I = 0; I < NumPasses; I++) {
179 PM.add(unwrap(Passes[I]));
182 // Add the passes to both of the pre-finalization extension points,
183 // so they are run for optimized and non-optimized builds.
184 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
186 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
190 #ifdef LLVM_COMPONENT_X86
191 #define SUBTARGET_X86 SUBTARGET(X86)
193 #define SUBTARGET_X86
196 #ifdef LLVM_COMPONENT_ARM
197 #define SUBTARGET_ARM SUBTARGET(ARM)
199 #define SUBTARGET_ARM
202 #ifdef LLVM_COMPONENT_AARCH64
203 #define SUBTARGET_AARCH64 SUBTARGET(AArch64)
205 #define SUBTARGET_AARCH64
208 #ifdef LLVM_COMPONENT_MIPS
209 #define SUBTARGET_MIPS SUBTARGET(Mips)
211 #define SUBTARGET_MIPS
214 #ifdef LLVM_COMPONENT_POWERPC
215 #define SUBTARGET_PPC SUBTARGET(PPC)
217 #define SUBTARGET_PPC
220 #ifdef LLVM_COMPONENT_SYSTEMZ
221 #define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
223 #define SUBTARGET_SYSTEMZ
226 #ifdef LLVM_COMPONENT_MSP430
227 #define SUBTARGET_MSP430 SUBTARGET(MSP430)
229 #define SUBTARGET_MSP430
232 #ifdef LLVM_COMPONENT_RISCV
233 #define SUBTARGET_RISCV SUBTARGET(RISCV)
235 #define SUBTARGET_RISCV
238 #ifdef LLVM_COMPONENT_SPARC
239 #define SUBTARGET_SPARC SUBTARGET(Sparc)
241 #define SUBTARGET_SPARC
244 #ifdef LLVM_COMPONENT_HEXAGON
245 #define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
247 #define SUBTARGET_HEXAGON
250 #define GEN_SUBTARGETS \
262 #define SUBTARGET(x) \
264 extern const SubtargetFeatureKV x##FeatureKV[]; \
265 extern const SubtargetFeatureKV x##SubTypeKV[]; \
271 extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
272 const char *Feature) {
273 TargetMachine *Target = unwrap(TM);
274 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
275 return MCInfo->checkFeatures(std::string("+") + Feature);
278 enum class LLVMRustCodeModel {
287 static CodeModel::Model fromRust(LLVMRustCodeModel Model) {
289 case LLVMRustCodeModel::Small:
290 return CodeModel::Small;
291 case LLVMRustCodeModel::Kernel:
292 return CodeModel::Kernel;
293 case LLVMRustCodeModel::Medium:
294 return CodeModel::Medium;
295 case LLVMRustCodeModel::Large:
296 return CodeModel::Large;
298 report_fatal_error("Bad CodeModel.");
302 enum class LLVMRustCodeGenOptLevel {
310 static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
312 case LLVMRustCodeGenOptLevel::None:
313 return CodeGenOpt::None;
314 case LLVMRustCodeGenOptLevel::Less:
315 return CodeGenOpt::Less;
316 case LLVMRustCodeGenOptLevel::Default:
317 return CodeGenOpt::Default;
318 case LLVMRustCodeGenOptLevel::Aggressive:
319 return CodeGenOpt::Aggressive;
321 report_fatal_error("Bad CodeGenOptLevel.");
325 enum class LLVMRustPassBuilderOptLevel {
334 static PassBuilder::OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
336 case LLVMRustPassBuilderOptLevel::O0:
337 return PassBuilder::O0;
338 case LLVMRustPassBuilderOptLevel::O1:
339 return PassBuilder::O1;
340 case LLVMRustPassBuilderOptLevel::O2:
341 return PassBuilder::O2;
342 case LLVMRustPassBuilderOptLevel::O3:
343 return PassBuilder::O3;
344 case LLVMRustPassBuilderOptLevel::Os:
345 return PassBuilder::Os;
346 case LLVMRustPassBuilderOptLevel::Oz:
347 return PassBuilder::Oz;
349 report_fatal_error("Bad PassBuilderOptLevel.");
353 enum class LLVMRustRelocMode {
363 static Optional<Reloc::Model> fromRust(LLVMRustRelocMode RustReloc) {
365 case LLVMRustRelocMode::Default:
367 case LLVMRustRelocMode::Static:
368 return Reloc::Static;
369 case LLVMRustRelocMode::PIC:
371 case LLVMRustRelocMode::DynamicNoPic:
372 return Reloc::DynamicNoPIC;
373 case LLVMRustRelocMode::ROPI:
375 case LLVMRustRelocMode::RWPI:
377 case LLVMRustRelocMode::ROPIRWPI:
378 return Reloc::ROPI_RWPI;
380 report_fatal_error("Bad RelocModel.");
384 /// getLongestEntryLength - Return the length of the longest entry in the table.
385 template<typename KV>
386 static size_t getLongestEntryLength(ArrayRef<KV> Table) {
388 for (auto &I : Table)
389 MaxLen = std::max(MaxLen, std::strlen(I.Key));
393 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
394 const TargetMachine *Target = unwrap(TM);
395 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
396 const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
397 const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
398 const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
399 unsigned MaxCPULen = getLongestEntryLength(CPUTable);
401 printf("Available CPUs for this target:\n");
402 if (HostArch == TargetArch) {
403 const StringRef HostCPU = sys::getHostCPUName();
404 printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
405 MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
407 for (auto &CPU : CPUTable)
408 printf(" %-*s\n", MaxCPULen, CPU.Key);
412 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef TM) {
413 const TargetMachine *Target = unwrap(TM);
414 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
415 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
416 unsigned MaxFeatLen = getLongestEntryLength(FeatTable);
418 printf("Available features for this target:\n");
419 for (auto &Feature : FeatTable)
420 printf(" %-*s - %s.\n", MaxFeatLen, Feature.Key, Feature.Desc);
423 printf("Use +feature to enable a feature, or -feature to disable it.\n"
424 "For example, rustc -C -target-cpu=mycpu -C "
425 "target-feature=+feature1,-feature2\n\n");
430 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
431 printf("Target CPU help is not supported by this LLVM version.\n\n");
434 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef) {
435 printf("Target features help is not supported by this LLVM version.\n\n");
439 extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
440 StringRef Name = sys::getHostCPUName();
445 extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
446 const char *TripleStr, const char *CPU, const char *Feature,
447 const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocMode RustReloc,
448 LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
449 bool PositionIndependentExecutable, bool FunctionSections,
451 bool TrapUnreachable,
454 bool EmitStackSizeSection,
455 bool RelaxELFRelocations) {
457 auto OptLevel = fromRust(RustOptLevel);
458 auto RM = fromRust(RustReloc);
461 Triple Trip(Triple::normalize(TripleStr));
462 const llvm::Target *TheTarget =
463 TargetRegistry::lookupTarget(Trip.getTriple(), Error);
464 if (TheTarget == nullptr) {
465 LLVMRustSetLastError(Error.c_str());
469 TargetOptions Options;
471 Options.FloatABIType = FloatABI::Default;
473 Options.FloatABIType = FloatABI::Soft;
475 Options.DataSections = DataSections;
476 Options.FunctionSections = FunctionSections;
477 Options.MCOptions.AsmVerbose = AsmComments;
478 Options.MCOptions.PreserveAsmComments = AsmComments;
479 Options.MCOptions.ABIName = ABIStr;
480 Options.RelaxELFRelocations = RelaxELFRelocations;
482 if (TrapUnreachable) {
483 // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
484 // This limits the extent of possible undefined behavior in some cases, as
485 // it prevents control flow from "falling through" into whatever code
486 // happens to be laid out next in memory.
487 Options.TrapUnreachable = true;
491 Options.ThreadModel = ThreadModel::Single;
494 Options.EmitStackSizeSection = EmitStackSizeSection;
496 Optional<CodeModel::Model> CM;
497 if (RustCM != LLVMRustCodeModel::None)
498 CM = fromRust(RustCM);
499 TargetMachine *TM = TheTarget->createTargetMachine(
500 Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
504 extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
508 extern "C" void LLVMRustConfigurePassManagerBuilder(
509 LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
510 bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
511 const char* PGOGenPath, const char* PGOUsePath) {
512 unwrap(PMBR)->MergeFunctions = MergeFunctions;
513 unwrap(PMBR)->SLPVectorize = SLPVectorize;
514 unwrap(PMBR)->OptLevel = fromRust(OptLevel);
515 unwrap(PMBR)->LoopVectorize = LoopVectorize;
516 unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
520 unwrap(PMBR)->EnablePGOInstrGen = true;
521 unwrap(PMBR)->PGOInstrGen = PGOGenPath;
525 unwrap(PMBR)->PGOInstrUse = PGOUsePath;
529 // Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
530 // field of a PassManagerBuilder, we expose our own method of doing so.
531 extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
533 bool DisableSimplifyLibCalls) {
534 Triple TargetTriple(unwrap(M)->getTargetTriple());
535 TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
536 if (DisableSimplifyLibCalls)
537 TLI->disableAllFunctions();
538 unwrap(PMBR)->LibraryInfo = TLI;
541 // Unfortunately, the LLVM C API doesn't provide a way to create the
542 // TargetLibraryInfo pass, so we use this method to do so.
543 extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
544 bool DisableSimplifyLibCalls) {
545 Triple TargetTriple(unwrap(M)->getTargetTriple());
546 TargetLibraryInfoImpl TLII(TargetTriple);
547 if (DisableSimplifyLibCalls)
548 TLII.disableAllFunctions();
549 unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
552 // Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
553 // all the functions in a module, so we do that manually here. You'll find
554 // similar code in clang's BackendUtil.cpp file.
555 extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
557 llvm::legacy::FunctionPassManager *P =
558 unwrap<llvm::legacy::FunctionPassManager>(PMR);
559 P->doInitialization();
561 // Upgrade all calls to old intrinsics first.
562 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
563 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
565 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
567 if (!I->isDeclaration())
573 extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
574 // Initializing the command-line options more than once is not allowed. So,
575 // check if they've already been initialized. (This could happen if we're
576 // being called from rustpkg, for example). If the arguments change, then
577 // that's just kinda unfortunate.
578 static bool Initialized = false;
582 cl::ParseCommandLineOptions(Argc, Argv);
585 enum class LLVMRustFileType {
591 #if LLVM_VERSION_GE(10, 0)
592 static CodeGenFileType fromRust(LLVMRustFileType Type) {
594 case LLVMRustFileType::AssemblyFile:
595 return CGFT_AssemblyFile;
596 case LLVMRustFileType::ObjectFile:
597 return CGFT_ObjectFile;
599 report_fatal_error("Bad FileType.");
603 static TargetMachine::CodeGenFileType fromRust(LLVMRustFileType Type) {
605 case LLVMRustFileType::AssemblyFile:
606 return TargetMachine::CGFT_AssemblyFile;
607 case LLVMRustFileType::ObjectFile:
608 return TargetMachine::CGFT_ObjectFile;
610 report_fatal_error("Bad FileType.");
615 extern "C" LLVMRustResult
616 LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
617 LLVMModuleRef M, const char *Path,
618 LLVMRustFileType RustFileType) {
619 llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
620 auto FileType = fromRust(RustFileType);
622 std::string ErrorInfo;
624 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
626 ErrorInfo = EC.message();
627 if (ErrorInfo != "") {
628 LLVMRustSetLastError(ErrorInfo.c_str());
629 return LLVMRustResult::Failure;
632 buffer_ostream BOS(OS);
633 unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
636 // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
637 // stream (OS), so the only real safe place to delete this is here? Don't we
638 // wish this was written in Rust?
639 LLVMDisposePassManager(PMR);
640 return LLVMRustResult::Success;
643 extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
644 const char*, // pass name
645 const char*); // IR name
646 extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
648 #if LLVM_VERSION_GE(9, 0)
650 std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
651 if (any_isa<const Module *>(WrappedIr))
652 return any_cast<const Module *>(WrappedIr)->getName().str();
653 if (any_isa<const Function *>(WrappedIr))
654 return any_cast<const Function *>(WrappedIr)->getName().str();
655 if (any_isa<const Loop *>(WrappedIr))
656 return any_cast<const Loop *>(WrappedIr)->getName().str();
657 if (any_isa<const LazyCallGraph::SCC *>(WrappedIr))
658 return any_cast<const LazyCallGraph::SCC *>(WrappedIr)->getName();
663 void LLVMSelfProfileInitializeCallbacks(
664 PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
665 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
666 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
667 PIC.registerBeforePassCallback([LlvmSelfProfiler, BeforePassCallback](
668 StringRef Pass, llvm::Any Ir) {
669 std::string PassName = Pass.str();
670 std::string IrName = LLVMRustwrappedIrGetName(Ir);
671 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
675 PIC.registerAfterPassCallback(
676 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
677 AfterPassCallback(LlvmSelfProfiler);
680 PIC.registerAfterPassInvalidatedCallback(
681 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass) {
682 AfterPassCallback(LlvmSelfProfiler);
685 PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
686 StringRef Pass, llvm::Any Ir) {
687 std::string PassName = Pass.str();
688 std::string IrName = LLVMRustwrappedIrGetName(Ir);
689 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
692 PIC.registerAfterAnalysisCallback(
693 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
694 AfterPassCallback(LlvmSelfProfiler);
699 enum class LLVMRustOptStage {
707 struct LLVMRustSanitizerOptions {
710 bool SanitizeAddress;
711 bool SanitizeRecover;
712 int SanitizeMemoryTrackOrigins;
716 LLVMRustOptimizeWithNewPassManager(
717 LLVMModuleRef ModuleRef,
718 LLVMTargetMachineRef TMRef,
719 LLVMRustPassBuilderOptLevel OptLevelRust,
720 LLVMRustOptStage OptStage,
721 bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
722 bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
723 bool DisableSimplifyLibCalls,
724 LLVMRustSanitizerOptions *SanitizerOptions,
725 const char *PGOGenPath, const char *PGOUsePath,
726 void* LlvmSelfProfiler,
727 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
728 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
729 #if LLVM_VERSION_GE(9, 0)
730 Module *TheModule = unwrap(ModuleRef);
731 TargetMachine *TM = unwrap(TMRef);
732 PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
734 // FIXME: MergeFunctions is not supported by NewPM yet.
735 (void) MergeFunctions;
737 PipelineTuningOptions PTO;
738 PTO.LoopUnrolling = UnrollLoops;
739 PTO.LoopInterleaving = UnrollLoops;
740 PTO.LoopVectorization = LoopVectorize;
741 PTO.SLPVectorization = SLPVectorize;
743 PassInstrumentationCallbacks PIC;
744 StandardInstrumentations SI;
745 SI.registerCallbacks(PIC);
747 if (LlvmSelfProfiler){
748 LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
751 Optional<PGOOptions> PGOOpt;
754 PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr);
755 } else if (PGOUsePath) {
757 PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse);
760 PassBuilder PB(TM, PTO, PGOOpt, &PIC);
762 // FIXME: We may want to expose this as an option.
763 bool DebugPassManager = false;
764 LoopAnalysisManager LAM(DebugPassManager);
765 FunctionAnalysisManager FAM(DebugPassManager);
766 CGSCCAnalysisManager CGAM(DebugPassManager);
767 ModuleAnalysisManager MAM(DebugPassManager);
769 FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
771 Triple TargetTriple(TheModule->getTargetTriple());
772 std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
773 if (DisableSimplifyLibCalls)
774 TLII->disableAllFunctions();
775 FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
777 PB.registerModuleAnalyses(MAM);
778 PB.registerCGSCCAnalyses(CGAM);
779 PB.registerFunctionAnalyses(FAM);
780 PB.registerLoopAnalyses(LAM);
781 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
783 // We manually collect pipeline callbacks so we can apply them at O0, where the
784 // PassBuilder does not create a pipeline.
785 std::vector<std::function<void(ModulePassManager &)>> PipelineStartEPCallbacks;
786 std::vector<std::function<void(FunctionPassManager &, PassBuilder::OptimizationLevel)>>
787 OptimizerLastEPCallbacks;
790 PipelineStartEPCallbacks.push_back([VerifyIR](ModulePassManager &MPM) {
791 MPM.addPass(VerifierPass());
795 if (SanitizerOptions) {
796 if (SanitizerOptions->SanitizeMemory) {
797 MemorySanitizerOptions Options(
798 SanitizerOptions->SanitizeMemoryTrackOrigins,
799 SanitizerOptions->SanitizeRecover,
800 /*CompileKernel=*/false);
801 #if LLVM_VERSION_GE(10, 0)
802 PipelineStartEPCallbacks.push_back([Options](ModulePassManager &MPM) {
803 MPM.addPass(MemorySanitizerPass(Options));
806 OptimizerLastEPCallbacks.push_back(
807 [Options](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
808 FPM.addPass(MemorySanitizerPass(Options));
813 if (SanitizerOptions->SanitizeThread) {
814 #if LLVM_VERSION_GE(10, 0)
815 PipelineStartEPCallbacks.push_back([](ModulePassManager &MPM) {
816 MPM.addPass(ThreadSanitizerPass());
819 OptimizerLastEPCallbacks.push_back(
820 [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
821 FPM.addPass(ThreadSanitizerPass());
826 if (SanitizerOptions->SanitizeAddress) {
827 PipelineStartEPCallbacks.push_back([&](ModulePassManager &MPM) {
828 MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
830 OptimizerLastEPCallbacks.push_back(
831 [SanitizerOptions](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
832 FPM.addPass(AddressSanitizerPass(
833 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover,
834 /*UseAfterScope=*/true));
837 PipelineStartEPCallbacks.push_back(
838 [SanitizerOptions](ModulePassManager &MPM) {
839 MPM.addPass(ModuleAddressSanitizerPass(
840 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover));
846 ModulePassManager MPM(DebugPassManager);
847 if (!NoPrepopulatePasses) {
848 if (OptLevel == PassBuilder::O0) {
849 for (const auto &C : PipelineStartEPCallbacks)
852 if (!OptimizerLastEPCallbacks.empty()) {
853 FunctionPassManager FPM(DebugPassManager);
854 for (const auto &C : OptimizerLastEPCallbacks)
856 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
859 MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
861 #if LLVM_VERSION_GE(10, 0)
863 PB.addPGOInstrPassesForO0(
864 MPM, DebugPassManager, PGOOpt->Action == PGOOptions::IRInstr,
865 /*IsCS=*/false, PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
869 for (const auto &C : PipelineStartEPCallbacks)
870 PB.registerPipelineStartEPCallback(C);
871 if (OptStage != LLVMRustOptStage::PreLinkThinLTO) {
872 for (const auto &C : OptimizerLastEPCallbacks)
873 PB.registerOptimizerLastEPCallback(C);
877 case LLVMRustOptStage::PreLinkNoLTO:
878 MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
880 case LLVMRustOptStage::PreLinkThinLTO:
881 MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
882 if (!OptimizerLastEPCallbacks.empty()) {
883 FunctionPassManager FPM(DebugPassManager);
884 for (const auto &C : OptimizerLastEPCallbacks)
886 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
889 case LLVMRustOptStage::PreLinkFatLTO:
890 MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
892 case LLVMRustOptStage::ThinLTO:
893 // FIXME: Does it make sense to pass the ModuleSummaryIndex?
894 // It only seems to be needed for C++ specific optimizations.
895 MPM = PB.buildThinLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
897 case LLVMRustOptStage::FatLTO:
898 MPM = PB.buildLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
904 if (UseThinLTOBuffers) {
905 MPM.addPass(CanonicalizeAliasesPass());
906 MPM.addPass(NameAnonGlobalPass());
909 // Upgrade all calls to old intrinsics first.
910 for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
911 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
913 MPM.run(*TheModule, MAM);
915 // The new pass manager has been available for a long time,
916 // but we don't bother supporting it on old LLVM versions.
917 report_fatal_error("New pass manager only supported since LLVM 9");
921 // Callback to demangle function name
923 // * name to be demangled
926 // * output buffer len
927 // Returns len of demangled string, or 0 if demangle failed.
928 typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
933 class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
935 std::vector<char> Buf;
938 RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
940 // Return empty string if demangle failed
941 // or if name does not need to be demangled
942 StringRef CallDemangle(StringRef name) {
947 if (Buf.size() < name.size() * 2) {
948 // Semangled name usually shorter than mangled,
949 // but allocate twice as much memory just in case
950 Buf.resize(name.size() * 2);
953 auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
959 auto Demangled = StringRef(Buf.data(), R);
960 if (Demangled == name) {
961 // Do not print anything if demangled name is equal to mangled.
968 void emitFunctionAnnot(const Function *F,
969 formatted_raw_ostream &OS) override {
970 StringRef Demangled = CallDemangle(F->getName());
971 if (Demangled.empty()) {
975 OS << "; " << Demangled << "\n";
978 void emitInstructionAnnot(const Instruction *I,
979 formatted_raw_ostream &OS) override {
982 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
984 Value = CI->getCalledValue();
985 } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
987 Value = II->getCalledValue();
989 // Could demangle more operations, e. g.
990 // `store %place, @function`.
994 if (!Value->hasName()) {
998 StringRef Demangled = CallDemangle(Value->getName());
999 if (Demangled.empty()) {
1003 OS << "; " << Name << " " << Demangled << "\n";
1009 extern "C" LLVMRustResult
1010 LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
1011 std::string ErrorInfo;
1013 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
1015 ErrorInfo = EC.message();
1016 if (ErrorInfo != "") {
1017 LLVMRustSetLastError(ErrorInfo.c_str());
1018 return LLVMRustResult::Failure;
1021 RustAssemblyAnnotationWriter AAW(Demangle);
1022 formatted_raw_ostream FOS(OS);
1023 unwrap(M)->print(FOS, &AAW);
1025 return LLVMRustResult::Success;
1028 extern "C" void LLVMRustPrintPasses() {
1029 LLVMInitializePasses();
1030 struct MyListener : PassRegistrationListener {
1031 void passEnumerate(const PassInfo *Info) {
1032 StringRef PassArg = Info->getPassArgument();
1033 StringRef PassName = Info->getPassName();
1034 if (!PassArg.empty()) {
1035 // These unsigned->signed casts could theoretically overflow, but
1036 // realistically never will (and even if, the result is implementation
1037 // defined rather plain UB).
1038 printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
1039 (int)PassName.size(), PassName.data());
1044 PassRegistry *PR = PassRegistry::getPassRegistry();
1045 PR->enumerateWith(&Listener);
1048 extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
1049 bool AddLifetimes) {
1050 unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
1053 extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
1055 llvm::legacy::PassManager passes;
1057 auto PreserveFunctions = [=](const GlobalValue &GV) {
1058 for (size_t I = 0; I < Len; I++) {
1059 if (GV.getName() == Symbols[I]) {
1066 passes.add(llvm::createInternalizePass(PreserveFunctions));
1068 passes.run(*unwrap(M));
1071 extern "C" void LLVMRustMarkAllFunctionsNounwind(LLVMModuleRef M) {
1072 for (Module::iterator GV = unwrap(M)->begin(), E = unwrap(M)->end(); GV != E;
1074 GV->setDoesNotThrow();
1075 Function *F = dyn_cast<Function>(GV);
1079 for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
1080 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
1081 if (isa<InvokeInst>(I)) {
1082 InvokeInst *CI = cast<InvokeInst>(I);
1083 CI->setDoesNotThrow();
1091 LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
1092 LLVMTargetMachineRef TMR) {
1093 TargetMachine *Target = unwrap(TMR);
1094 unwrap(Module)->setDataLayout(Target->createDataLayout());
1097 extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
1098 unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
1101 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
1102 unwrap(M)->setPIELevel(PIELevel::Level::Large);
1105 // Here you'll find an implementation of ThinLTO as used by the Rust compiler
1106 // right now. This ThinLTO support is only enabled on "recent ish" versions of
1107 // LLVM, and otherwise it's just blanket rejected from other compilers.
1109 // Most of this implementation is straight copied from LLVM. At the time of
1110 // this writing it wasn't *quite* suitable to reuse more code from upstream
1111 // for our purposes, but we should strive to upstream this support once it's
1112 // ready to go! I figure we may want a bit of testing locally first before
1113 // sending this upstream to LLVM. I hear though they're quite eager to receive
1114 // feedback like this!
1116 // If you're reading this code and wondering "what in the world" or you're
1117 // working "good lord by LLVM upgrade is *still* failing due to these bindings"
1118 // then fear not! (ok maybe fear a little). All code here is mostly based
1119 // on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
1121 // You'll find that the general layout here roughly corresponds to the `run`
1122 // method in that file as well as `ProcessThinLTOModule`. Functions are
1123 // specifically commented below as well, but if you're updating this code
1124 // or otherwise trying to understand it, the LLVM source will be useful in
1125 // interpreting the mysteries within.
1127 // Otherwise I'll apologize in advance, it probably requires a relatively
1128 // significant investment on your part to "truly understand" what's going on
1129 // here. Not saying I do myself, but it took me awhile staring at LLVM's source
1130 // and various online resources about ThinLTO to make heads or tails of all
1133 // This is a shared data structure which *must* be threadsafe to share
1134 // read-only amongst threads. This also corresponds basically to the arguments
1135 // of the `ProcessThinLTOModule` function in the LLVM source.
1136 struct LLVMRustThinLTOData {
1137 // The combined index that is the global analysis over all modules we're
1138 // performing ThinLTO for. This is mostly managed by LLVM.
1139 ModuleSummaryIndex Index;
1141 // All modules we may look at, stored as in-memory serialized versions. This
1142 // is later used when inlining to ensure we can extract any module to inline
1144 StringMap<MemoryBufferRef> ModuleMap;
1146 // A set that we manage of everything we *don't* want internalized. Note that
1147 // this includes all transitive references right now as well, but it may not
1149 DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
1151 // Not 100% sure what these are, but they impact what's internalized and
1152 // what's inlined across modules, I believe.
1153 StringMap<FunctionImporter::ImportMapTy> ImportLists;
1154 StringMap<FunctionImporter::ExportSetTy> ExportLists;
1155 StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
1157 LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
1160 // Just an argument to the `LLVMRustCreateThinLTOData` function below.
1161 struct LLVMRustThinLTOModule {
1162 const char *identifier;
1167 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
1169 static const GlobalValueSummary *
1170 getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
1171 auto StrongDefForLinker = llvm::find_if(
1172 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1173 auto Linkage = Summary->linkage();
1174 return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
1175 !GlobalValue::isWeakForLinker(Linkage);
1177 if (StrongDefForLinker != GVSummaryList.end())
1178 return StrongDefForLinker->get();
1180 auto FirstDefForLinker = llvm::find_if(
1181 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1182 auto Linkage = Summary->linkage();
1183 return !GlobalValue::isAvailableExternallyLinkage(Linkage);
1185 if (FirstDefForLinker == GVSummaryList.end())
1187 return FirstDefForLinker->get();
1190 // The main entry point for creating the global ThinLTO analysis. The structure
1191 // here is basically the same as before threads are spawned in the `run`
1192 // function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
1193 extern "C" LLVMRustThinLTOData*
1194 LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
1196 const char **preserved_symbols,
1198 #if LLVM_VERSION_GE(10, 0)
1199 auto Ret = std::make_unique<LLVMRustThinLTOData>();
1201 auto Ret = llvm::make_unique<LLVMRustThinLTOData>();
1204 // Load each module's summary and merge it into one combined index
1205 for (int i = 0; i < num_modules; i++) {
1206 auto module = &modules[i];
1207 StringRef buffer(module->data, module->len);
1208 MemoryBufferRef mem_buffer(buffer, module->identifier);
1210 Ret->ModuleMap[module->identifier] = mem_buffer;
1212 if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
1213 LLVMRustSetLastError(toString(std::move(Err)).c_str());
1218 // Collect for each module the list of function it defines (GUID -> Summary)
1219 Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
1221 // Convert the preserved symbols set from string to GUID, this is then needed
1222 // for internalization.
1223 for (int i = 0; i < num_symbols; i++) {
1224 auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
1225 Ret->GUIDPreservedSymbols.insert(GUID);
1228 // Collect the import/export lists for all modules from the call-graph in the
1231 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
1232 auto deadIsPrevailing = [&](GlobalValue::GUID G) {
1233 return PrevailingType::Unknown;
1235 #if LLVM_VERSION_GE(8, 0)
1236 // We don't have a complete picture in our use of ThinLTO, just our immediate
1237 // crate, so we need `ImportEnabled = false` to limit internalization.
1238 // Otherwise, we sometimes lose `static` values -- see #60184.
1239 computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
1240 deadIsPrevailing, /* ImportEnabled = */ false);
1242 computeDeadSymbols(Ret->Index, Ret->GUIDPreservedSymbols, deadIsPrevailing);
1244 ComputeCrossModuleImport(
1246 Ret->ModuleToDefinedGVSummaries,
1251 // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
1252 // impacts the caching.
1254 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
1255 // being lifted from `lib/LTO/LTO.cpp` as well
1256 StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
1257 DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
1258 for (auto &I : Ret->Index) {
1259 if (I.second.SummaryList.size() > 1)
1260 PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
1262 auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
1263 const auto &Prevailing = PrevailingCopy.find(GUID);
1264 if (Prevailing == PrevailingCopy.end())
1266 return Prevailing->second == S;
1268 auto recordNewLinkage = [&](StringRef ModuleIdentifier,
1269 GlobalValue::GUID GUID,
1270 GlobalValue::LinkageTypes NewLinkage) {
1271 ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
1273 #if LLVM_VERSION_GE(9, 0)
1274 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
1275 Ret->GUIDPreservedSymbols);
1276 #elif LLVM_VERSION_GE(8, 0)
1277 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1279 thinLTOResolveWeakForLinkerInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1282 // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
1283 // callback below. This callback below will dictate the linkage for all
1284 // summaries in the index, and we basically just only want to ensure that dead
1285 // symbols are internalized. Otherwise everything that's already external
1286 // linkage will stay as external, and internal will stay as internal.
1287 std::set<GlobalValue::GUID> ExportedGUIDs;
1288 for (auto &List : Ret->Index) {
1289 for (auto &GVS: List.second.SummaryList) {
1290 if (GlobalValue::isLocalLinkage(GVS->linkage()))
1292 auto GUID = GVS->getOriginalName();
1293 if (GVS->flags().Live)
1294 ExportedGUIDs.insert(GUID);
1297 #if LLVM_VERSION_GE(10, 0)
1298 auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
1299 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1300 return (ExportList != Ret->ExportLists.end() &&
1301 ExportList->second.count(VI)) ||
1302 ExportedGUIDs.count(VI.getGUID());
1304 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
1306 auto isExported = [&](StringRef ModuleIdentifier, GlobalValue::GUID GUID) {
1307 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1308 return (ExportList != Ret->ExportLists.end() &&
1309 ExportList->second.count(GUID)) ||
1310 ExportedGUIDs.count(GUID);
1312 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported);
1315 return Ret.release();
1319 LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
1323 // Below are the various passes that happen *per module* when doing ThinLTO.
1325 // In other words, these are the functions that are all run concurrently
1326 // with one another, one per module. The passes here correspond to the analysis
1327 // passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
1328 // `ProcessThinLTOModule` function. Here they're split up into separate steps
1329 // so rustc can save off the intermediate bytecode between each step.
1332 LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1333 Module &Mod = *unwrap(M);
1334 if (renameModuleForThinLTO(Mod, Data->Index)) {
1335 LLVMRustSetLastError("renameModuleForThinLTO failed");
1342 LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1343 Module &Mod = *unwrap(M);
1344 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1345 #if LLVM_VERSION_GE(8, 0)
1346 thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
1348 thinLTOResolveWeakForLinkerModule(Mod, DefinedGlobals);
1354 LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1355 Module &Mod = *unwrap(M);
1356 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1357 thinLTOInternalizeModule(Mod, DefinedGlobals);
1362 LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1363 Module &Mod = *unwrap(M);
1365 const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
1366 auto Loader = [&](StringRef Identifier) {
1367 const auto &Memory = Data->ModuleMap.lookup(Identifier);
1368 auto &Context = Mod.getContext();
1369 auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
1374 // The rest of this closure is a workaround for
1375 // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
1376 // we accidentally import wasm custom sections into different modules,
1377 // duplicating them by in the final output artifact.
1379 // The issue is worked around here by manually removing the
1380 // `wasm.custom_sections` named metadata node from any imported module. This
1381 // we know isn't used by any optimization pass so there's no need for it to
1384 // Note that the metadata is currently lazily loaded, so we materialize it
1385 // here before looking up if there's metadata inside. The `FunctionImporter`
1386 // will immediately materialize metadata anyway after an import, so this
1387 // shouldn't be a perf hit.
1388 if (Error Err = (*MOrErr)->materializeMetadata()) {
1389 Expected<std::unique_ptr<Module>> Ret(std::move(Err));
1393 auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
1394 if (WasmCustomSections)
1395 WasmCustomSections->eraseFromParent();
1399 FunctionImporter Importer(Data->Index, Loader);
1400 Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
1402 LLVMRustSetLastError(toString(Result.takeError()).c_str());
1408 extern "C" typedef void (*LLVMRustModuleNameCallback)(void*, // payload
1409 const char*, // importing module name
1410 const char*); // imported module name
1412 // Calls `module_name_callback` for each module import done by ThinLTO.
1413 // The callback is provided with regular null-terminated C strings.
1415 LLVMRustGetThinLTOModuleImports(const LLVMRustThinLTOData *data,
1416 LLVMRustModuleNameCallback module_name_callback,
1417 void* callback_payload) {
1418 for (const auto& importing_module : data->ImportLists) {
1419 const std::string importing_module_id = importing_module.getKey().str();
1420 const auto& imports = importing_module.getValue();
1421 for (const auto& imported_module : imports) {
1422 const std::string imported_module_id = imported_module.getKey().str();
1423 module_name_callback(callback_payload,
1424 importing_module_id.c_str(),
1425 imported_module_id.c_str());
1430 // This struct and various functions are sort of a hack right now, but the
1431 // problem is that we've got in-memory LLVM modules after we generate and
1432 // optimize all codegen-units for one compilation in rustc. To be compatible
1433 // with the LTO support above we need to serialize the modules plus their
1434 // ThinLTO summary into memory.
1436 // This structure is basically an owned version of a serialize module, with
1437 // a ThinLTO summary attached.
1438 struct LLVMRustThinLTOBuffer {
1442 extern "C" LLVMRustThinLTOBuffer*
1443 LLVMRustThinLTOBufferCreate(LLVMModuleRef M) {
1444 #if LLVM_VERSION_GE(10, 0)
1445 auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
1447 auto Ret = llvm::make_unique<LLVMRustThinLTOBuffer>();
1450 raw_string_ostream OS(Ret->data);
1452 legacy::PassManager PM;
1453 PM.add(createWriteThinLTOBitcodePass(OS));
1457 return Ret.release();
1461 LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
1465 extern "C" const void*
1466 LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
1467 return Buffer->data.data();
1471 LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
1472 return Buffer->data.length();
1475 // This is what we used to parse upstream bitcode for actual ThinLTO
1476 // processing. We'll call this once per module optimized through ThinLTO, and
1477 // it'll be called concurrently on many threads.
1478 extern "C" LLVMModuleRef
1479 LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
1482 const char *identifier) {
1483 StringRef Data(data, len);
1484 MemoryBufferRef Buffer(Data, identifier);
1485 unwrap(Context)->enableDebugTypeODRUniquing();
1486 Expected<std::unique_ptr<Module>> SrcOrError =
1487 parseBitcodeFile(Buffer, *unwrap(Context));
1489 LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
1492 return wrap(std::move(*SrcOrError).release());
1495 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1496 // the comment in `back/lto.rs` for why this exists.
1498 LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
1500 DICompileUnit **B) {
1501 Module *M = unwrap(Mod);
1502 DICompileUnit **Cur = A;
1503 DICompileUnit **Next = B;
1504 for (DICompileUnit *CU : M->debug_compile_units()) {
1513 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1514 // the comment in `back/lto.rs` for why this exists.
1516 LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
1517 Module *M = unwrap(Mod);
1519 // If the original source module didn't have a `DICompileUnit` then try to
1520 // merge all the existing compile units. If there aren't actually any though
1521 // then there's not much for us to do so return.
1522 if (Unit == nullptr) {
1523 for (DICompileUnit *CU : M->debug_compile_units()) {
1527 if (Unit == nullptr)
1531 // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
1532 // process it recursively. Note that we specifically iterate over instructions
1533 // to ensure we feed everything into it.
1534 DebugInfoFinder Finder;
1535 Finder.processModule(*M);
1536 for (Function &F : M->functions()) {
1537 for (auto &FI : F) {
1538 for (Instruction &BI : FI) {
1539 if (auto Loc = BI.getDebugLoc())
1540 Finder.processLocation(*M, Loc);
1541 if (auto DVI = dyn_cast<DbgValueInst>(&BI))
1542 Finder.processValue(*M, DVI);
1543 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1544 Finder.processDeclare(*M, DDI);
1549 // After we've found all our debuginfo, rewrite all subprograms to point to
1550 // the same `DICompileUnit`.
1551 for (auto &F : Finder.subprograms()) {
1552 F->replaceUnit(Unit);
1555 // Erase any other references to other `DICompileUnit` instances, the verifier
1556 // will later ensure that we don't actually have any other stale references to
1558 auto *MD = M->getNamedMetadata("llvm.dbg.cu");
1559 MD->clearOperands();
1560 MD->addOperand(Unit);