8 #include "llvm/Analysis/TargetLibraryInfo.h"
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/CodeGen/TargetSubtargetInfo.h"
11 #include "llvm/InitializePasses.h"
12 #include "llvm/IR/AutoUpgrade.h"
13 #include "llvm/IR/AssemblyAnnotationWriter.h"
14 #include "llvm/IR/IntrinsicInst.h"
15 #include "llvm/IR/Verifier.h"
16 #include "llvm/Passes/PassBuilder.h"
17 #if LLVM_VERSION_GE(9, 0)
18 #include "llvm/Passes/StandardInstrumentations.h"
20 #include "llvm/Support/CBindingWrapping.h"
21 #include "llvm/Support/FileSystem.h"
22 #include "llvm/Support/Host.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
25 #include "llvm/Transforms/IPO/AlwaysInliner.h"
26 #include "llvm/Transforms/IPO/FunctionImport.h"
27 #include "llvm/Transforms/Utils/FunctionImportUtils.h"
28 #include "llvm/LTO/LTO.h"
29 #include "llvm-c/Transforms/PassManagerBuilder.h"
31 #include "llvm/Transforms/Instrumentation.h"
32 #if LLVM_VERSION_GE(9, 0)
33 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
34 #include "llvm/Support/TimeProfiler.h"
36 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
37 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
38 #if LLVM_VERSION_GE(9, 0)
39 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
41 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
45 typedef struct LLVMOpaquePass *LLVMPassRef;
46 typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
48 DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
49 DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
50 DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBuilder,
51 LLVMPassManagerBuilderRef)
53 extern "C" void LLVMInitializePasses() {
54 PassRegistry &Registry = *PassRegistry::getPassRegistry();
55 initializeCore(Registry);
56 initializeCodeGen(Registry);
57 initializeScalarOpts(Registry);
58 initializeVectorization(Registry);
59 initializeIPO(Registry);
60 initializeAnalysis(Registry);
61 initializeTransformUtils(Registry);
62 initializeInstCombine(Registry);
63 initializeInstrumentation(Registry);
64 initializeTarget(Registry);
67 extern "C" void LLVMTimeTraceProfilerInitialize() {
68 #if LLVM_VERSION_GE(10, 0)
69 timeTraceProfilerInitialize(
70 /* TimeTraceGranularity */ 0,
71 /* ProcName */ "rustc");
72 #elif LLVM_VERSION_GE(9, 0)
73 timeTraceProfilerInitialize();
77 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
78 #if LLVM_VERSION_GE(9, 0)
79 StringRef FN(FileName);
81 raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
83 timeTraceProfilerWrite(OS);
84 timeTraceProfilerCleanup();
88 enum class LLVMRustPassKind {
94 static LLVMRustPassKind toRust(PassKind Kind) {
97 return LLVMRustPassKind::Function;
99 return LLVMRustPassKind::Module;
101 return LLVMRustPassKind::Other;
105 extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
106 StringRef SR(PassName);
107 PassRegistry *PR = PassRegistry::getPassRegistry();
109 const PassInfo *PI = PR->getPassInfo(SR);
111 return wrap(PI->createPass());
116 extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
117 const bool CompileKernel = false;
118 const bool UseAfterScope = true;
120 return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
123 extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
124 const bool CompileKernel = false;
126 #if LLVM_VERSION_GE(9, 0)
127 return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
129 return wrap(createAddressSanitizerModulePass(CompileKernel, Recover));
133 extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
134 #if LLVM_VERSION_GE(9, 0)
135 const bool CompileKernel = false;
137 return wrap(createMemorySanitizerLegacyPassPass(
138 MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
140 return wrap(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover));
144 extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
145 return wrap(createThreadSanitizerLegacyPassPass());
148 extern "C" LLVMRustPassKind LLVMRustPassKind(LLVMPassRef RustPass) {
150 Pass *Pass = unwrap(RustPass);
151 return toRust(Pass->getPassKind());
154 extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
156 Pass *Pass = unwrap(RustPass);
157 PassManagerBase *PMB = unwrap(PMR);
162 void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
163 LLVMPassManagerBuilderRef PMBR,
164 LLVMPassManagerRef PMR
166 unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
170 void LLVMRustAddLastExtensionPasses(
171 LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
172 auto AddExtensionPasses = [Passes, NumPasses](
173 const PassManagerBuilder &Builder, PassManagerBase &PM) {
174 for (size_t I = 0; I < NumPasses; I++) {
175 PM.add(unwrap(Passes[I]));
178 // Add the passes to both of the pre-finalization extension points,
179 // so they are run for optimized and non-optimized builds.
180 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
182 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
186 #ifdef LLVM_COMPONENT_X86
187 #define SUBTARGET_X86 SUBTARGET(X86)
189 #define SUBTARGET_X86
192 #ifdef LLVM_COMPONENT_ARM
193 #define SUBTARGET_ARM SUBTARGET(ARM)
195 #define SUBTARGET_ARM
198 #ifdef LLVM_COMPONENT_AARCH64
199 #define SUBTARGET_AARCH64 SUBTARGET(AArch64)
201 #define SUBTARGET_AARCH64
204 #ifdef LLVM_COMPONENT_MIPS
205 #define SUBTARGET_MIPS SUBTARGET(Mips)
207 #define SUBTARGET_MIPS
210 #ifdef LLVM_COMPONENT_POWERPC
211 #define SUBTARGET_PPC SUBTARGET(PPC)
213 #define SUBTARGET_PPC
216 #ifdef LLVM_COMPONENT_SYSTEMZ
217 #define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
219 #define SUBTARGET_SYSTEMZ
222 #ifdef LLVM_COMPONENT_MSP430
223 #define SUBTARGET_MSP430 SUBTARGET(MSP430)
225 #define SUBTARGET_MSP430
228 #ifdef LLVM_COMPONENT_RISCV
229 #define SUBTARGET_RISCV SUBTARGET(RISCV)
231 #define SUBTARGET_RISCV
234 #ifdef LLVM_COMPONENT_SPARC
235 #define SUBTARGET_SPARC SUBTARGET(Sparc)
237 #define SUBTARGET_SPARC
240 #ifdef LLVM_COMPONENT_HEXAGON
241 #define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
243 #define SUBTARGET_HEXAGON
246 #define GEN_SUBTARGETS \
258 #define SUBTARGET(x) \
260 extern const SubtargetFeatureKV x##FeatureKV[]; \
261 extern const SubtargetFeatureKV x##SubTypeKV[]; \
267 extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
268 const char *Feature) {
269 TargetMachine *Target = unwrap(TM);
270 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
271 return MCInfo->checkFeatures(std::string("+") + Feature);
274 enum class LLVMRustCodeModel {
283 static CodeModel::Model fromRust(LLVMRustCodeModel Model) {
285 case LLVMRustCodeModel::Small:
286 return CodeModel::Small;
287 case LLVMRustCodeModel::Kernel:
288 return CodeModel::Kernel;
289 case LLVMRustCodeModel::Medium:
290 return CodeModel::Medium;
291 case LLVMRustCodeModel::Large:
292 return CodeModel::Large;
294 report_fatal_error("Bad CodeModel.");
298 enum class LLVMRustCodeGenOptLevel {
306 static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
308 case LLVMRustCodeGenOptLevel::None:
309 return CodeGenOpt::None;
310 case LLVMRustCodeGenOptLevel::Less:
311 return CodeGenOpt::Less;
312 case LLVMRustCodeGenOptLevel::Default:
313 return CodeGenOpt::Default;
314 case LLVMRustCodeGenOptLevel::Aggressive:
315 return CodeGenOpt::Aggressive;
317 report_fatal_error("Bad CodeGenOptLevel.");
321 enum class LLVMRustPassBuilderOptLevel {
330 static PassBuilder::OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
332 case LLVMRustPassBuilderOptLevel::O0:
333 return PassBuilder::O0;
334 case LLVMRustPassBuilderOptLevel::O1:
335 return PassBuilder::O1;
336 case LLVMRustPassBuilderOptLevel::O2:
337 return PassBuilder::O2;
338 case LLVMRustPassBuilderOptLevel::O3:
339 return PassBuilder::O3;
340 case LLVMRustPassBuilderOptLevel::Os:
341 return PassBuilder::Os;
342 case LLVMRustPassBuilderOptLevel::Oz:
343 return PassBuilder::Oz;
345 report_fatal_error("Bad PassBuilderOptLevel.");
349 enum class LLVMRustRelocModel {
358 static Reloc::Model fromRust(LLVMRustRelocModel RustReloc) {
360 case LLVMRustRelocModel::Static:
361 return Reloc::Static;
362 case LLVMRustRelocModel::PIC:
364 case LLVMRustRelocModel::DynamicNoPic:
365 return Reloc::DynamicNoPIC;
366 case LLVMRustRelocModel::ROPI:
368 case LLVMRustRelocModel::RWPI:
370 case LLVMRustRelocModel::ROPIRWPI:
371 return Reloc::ROPI_RWPI;
373 report_fatal_error("Bad RelocModel.");
377 /// getLongestEntryLength - Return the length of the longest entry in the table.
378 template<typename KV>
379 static size_t getLongestEntryLength(ArrayRef<KV> Table) {
381 for (auto &I : Table)
382 MaxLen = std::max(MaxLen, std::strlen(I.Key));
386 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
387 const TargetMachine *Target = unwrap(TM);
388 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
389 const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
390 const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
391 const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
392 unsigned MaxCPULen = getLongestEntryLength(CPUTable);
394 printf("Available CPUs for this target:\n");
395 if (HostArch == TargetArch) {
396 const StringRef HostCPU = sys::getHostCPUName();
397 printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
398 MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
400 for (auto &CPU : CPUTable)
401 printf(" %-*s\n", MaxCPULen, CPU.Key);
405 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef TM) {
406 const TargetMachine *Target = unwrap(TM);
407 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
408 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
409 unsigned MaxFeatLen = getLongestEntryLength(FeatTable);
411 printf("Available features for this target:\n");
412 for (auto &Feature : FeatTable)
413 printf(" %-*s - %s.\n", MaxFeatLen, Feature.Key, Feature.Desc);
416 printf("Use +feature to enable a feature, or -feature to disable it.\n"
417 "For example, rustc -C -target-cpu=mycpu -C "
418 "target-feature=+feature1,-feature2\n\n");
423 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
424 printf("Target CPU help is not supported by this LLVM version.\n\n");
427 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef) {
428 printf("Target features help is not supported by this LLVM version.\n\n");
432 extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
433 StringRef Name = sys::getHostCPUName();
438 extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
439 const char *TripleStr, const char *CPU, const char *Feature,
440 const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocModel RustReloc,
441 LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
442 bool PositionIndependentExecutable, bool FunctionSections,
444 bool TrapUnreachable,
447 bool EmitStackSizeSection,
448 bool RelaxELFRelocations) {
450 auto OptLevel = fromRust(RustOptLevel);
451 auto RM = fromRust(RustReloc);
454 Triple Trip(Triple::normalize(TripleStr));
455 const llvm::Target *TheTarget =
456 TargetRegistry::lookupTarget(Trip.getTriple(), Error);
457 if (TheTarget == nullptr) {
458 LLVMRustSetLastError(Error.c_str());
462 TargetOptions Options;
464 Options.FloatABIType = FloatABI::Default;
466 Options.FloatABIType = FloatABI::Soft;
468 Options.DataSections = DataSections;
469 Options.FunctionSections = FunctionSections;
470 Options.MCOptions.AsmVerbose = AsmComments;
471 Options.MCOptions.PreserveAsmComments = AsmComments;
472 Options.MCOptions.ABIName = ABIStr;
473 Options.RelaxELFRelocations = RelaxELFRelocations;
475 if (TrapUnreachable) {
476 // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
477 // This limits the extent of possible undefined behavior in some cases, as
478 // it prevents control flow from "falling through" into whatever code
479 // happens to be laid out next in memory.
480 Options.TrapUnreachable = true;
484 Options.ThreadModel = ThreadModel::Single;
487 Options.EmitStackSizeSection = EmitStackSizeSection;
489 Optional<CodeModel::Model> CM;
490 if (RustCM != LLVMRustCodeModel::None)
491 CM = fromRust(RustCM);
492 TargetMachine *TM = TheTarget->createTargetMachine(
493 Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
497 extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
501 extern "C" void LLVMRustConfigurePassManagerBuilder(
502 LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
503 bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
504 const char* PGOGenPath, const char* PGOUsePath) {
505 unwrap(PMBR)->MergeFunctions = MergeFunctions;
506 unwrap(PMBR)->SLPVectorize = SLPVectorize;
507 unwrap(PMBR)->OptLevel = fromRust(OptLevel);
508 unwrap(PMBR)->LoopVectorize = LoopVectorize;
509 unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
513 unwrap(PMBR)->EnablePGOInstrGen = true;
514 unwrap(PMBR)->PGOInstrGen = PGOGenPath;
518 unwrap(PMBR)->PGOInstrUse = PGOUsePath;
522 // Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
523 // field of a PassManagerBuilder, we expose our own method of doing so.
524 extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
526 bool DisableSimplifyLibCalls) {
527 Triple TargetTriple(unwrap(M)->getTargetTriple());
528 TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
529 if (DisableSimplifyLibCalls)
530 TLI->disableAllFunctions();
531 unwrap(PMBR)->LibraryInfo = TLI;
534 // Unfortunately, the LLVM C API doesn't provide a way to create the
535 // TargetLibraryInfo pass, so we use this method to do so.
536 extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
537 bool DisableSimplifyLibCalls) {
538 Triple TargetTriple(unwrap(M)->getTargetTriple());
539 TargetLibraryInfoImpl TLII(TargetTriple);
540 if (DisableSimplifyLibCalls)
541 TLII.disableAllFunctions();
542 unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
545 // Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
546 // all the functions in a module, so we do that manually here. You'll find
547 // similar code in clang's BackendUtil.cpp file.
548 extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
550 llvm::legacy::FunctionPassManager *P =
551 unwrap<llvm::legacy::FunctionPassManager>(PMR);
552 P->doInitialization();
554 // Upgrade all calls to old intrinsics first.
555 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
556 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
558 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
560 if (!I->isDeclaration())
566 extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
567 // Initializing the command-line options more than once is not allowed. So,
568 // check if they've already been initialized. (This could happen if we're
569 // being called from rustpkg, for example). If the arguments change, then
570 // that's just kinda unfortunate.
571 static bool Initialized = false;
575 cl::ParseCommandLineOptions(Argc, Argv);
578 enum class LLVMRustFileType {
584 #if LLVM_VERSION_GE(10, 0)
585 static CodeGenFileType fromRust(LLVMRustFileType Type) {
587 case LLVMRustFileType::AssemblyFile:
588 return CGFT_AssemblyFile;
589 case LLVMRustFileType::ObjectFile:
590 return CGFT_ObjectFile;
592 report_fatal_error("Bad FileType.");
596 static TargetMachine::CodeGenFileType fromRust(LLVMRustFileType Type) {
598 case LLVMRustFileType::AssemblyFile:
599 return TargetMachine::CGFT_AssemblyFile;
600 case LLVMRustFileType::ObjectFile:
601 return TargetMachine::CGFT_ObjectFile;
603 report_fatal_error("Bad FileType.");
608 extern "C" LLVMRustResult
609 LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
610 LLVMModuleRef M, const char *Path,
611 LLVMRustFileType RustFileType) {
612 llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
613 auto FileType = fromRust(RustFileType);
615 std::string ErrorInfo;
617 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
619 ErrorInfo = EC.message();
620 if (ErrorInfo != "") {
621 LLVMRustSetLastError(ErrorInfo.c_str());
622 return LLVMRustResult::Failure;
625 buffer_ostream BOS(OS);
626 unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
629 // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
630 // stream (OS), so the only real safe place to delete this is here? Don't we
631 // wish this was written in Rust?
632 LLVMDisposePassManager(PMR);
633 return LLVMRustResult::Success;
636 extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
637 const char*, // pass name
638 const char*); // IR name
639 extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
641 #if LLVM_VERSION_GE(9, 0)
643 std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
644 if (any_isa<const Module *>(WrappedIr))
645 return any_cast<const Module *>(WrappedIr)->getName().str();
646 if (any_isa<const Function *>(WrappedIr))
647 return any_cast<const Function *>(WrappedIr)->getName().str();
648 if (any_isa<const Loop *>(WrappedIr))
649 return any_cast<const Loop *>(WrappedIr)->getName().str();
650 if (any_isa<const LazyCallGraph::SCC *>(WrappedIr))
651 return any_cast<const LazyCallGraph::SCC *>(WrappedIr)->getName();
656 void LLVMSelfProfileInitializeCallbacks(
657 PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
658 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
659 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
660 PIC.registerBeforePassCallback([LlvmSelfProfiler, BeforePassCallback](
661 StringRef Pass, llvm::Any Ir) {
662 std::string PassName = Pass.str();
663 std::string IrName = LLVMRustwrappedIrGetName(Ir);
664 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
668 PIC.registerAfterPassCallback(
669 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
670 AfterPassCallback(LlvmSelfProfiler);
673 PIC.registerAfterPassInvalidatedCallback(
674 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass) {
675 AfterPassCallback(LlvmSelfProfiler);
678 PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
679 StringRef Pass, llvm::Any Ir) {
680 std::string PassName = Pass.str();
681 std::string IrName = LLVMRustwrappedIrGetName(Ir);
682 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
685 PIC.registerAfterAnalysisCallback(
686 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
687 AfterPassCallback(LlvmSelfProfiler);
692 enum class LLVMRustOptStage {
700 struct LLVMRustSanitizerOptions {
703 bool SanitizeAddress;
704 bool SanitizeRecover;
705 int SanitizeMemoryTrackOrigins;
709 LLVMRustOptimizeWithNewPassManager(
710 LLVMModuleRef ModuleRef,
711 LLVMTargetMachineRef TMRef,
712 LLVMRustPassBuilderOptLevel OptLevelRust,
713 LLVMRustOptStage OptStage,
714 bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
715 bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
716 bool DisableSimplifyLibCalls,
717 LLVMRustSanitizerOptions *SanitizerOptions,
718 const char *PGOGenPath, const char *PGOUsePath,
719 void* LlvmSelfProfiler,
720 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
721 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
722 #if LLVM_VERSION_GE(9, 0)
723 Module *TheModule = unwrap(ModuleRef);
724 TargetMachine *TM = unwrap(TMRef);
725 PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
727 // FIXME: MergeFunctions is not supported by NewPM yet.
728 (void) MergeFunctions;
730 PipelineTuningOptions PTO;
731 PTO.LoopUnrolling = UnrollLoops;
732 PTO.LoopInterleaving = UnrollLoops;
733 PTO.LoopVectorization = LoopVectorize;
734 PTO.SLPVectorization = SLPVectorize;
736 PassInstrumentationCallbacks PIC;
737 StandardInstrumentations SI;
738 SI.registerCallbacks(PIC);
740 if (LlvmSelfProfiler){
741 LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
744 Optional<PGOOptions> PGOOpt;
747 PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr);
748 } else if (PGOUsePath) {
750 PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse);
753 PassBuilder PB(TM, PTO, PGOOpt, &PIC);
755 // FIXME: We may want to expose this as an option.
756 bool DebugPassManager = false;
757 LoopAnalysisManager LAM(DebugPassManager);
758 FunctionAnalysisManager FAM(DebugPassManager);
759 CGSCCAnalysisManager CGAM(DebugPassManager);
760 ModuleAnalysisManager MAM(DebugPassManager);
762 FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
764 Triple TargetTriple(TheModule->getTargetTriple());
765 std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
766 if (DisableSimplifyLibCalls)
767 TLII->disableAllFunctions();
768 FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
770 PB.registerModuleAnalyses(MAM);
771 PB.registerCGSCCAnalyses(CGAM);
772 PB.registerFunctionAnalyses(FAM);
773 PB.registerLoopAnalyses(LAM);
774 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
776 // We manually collect pipeline callbacks so we can apply them at O0, where the
777 // PassBuilder does not create a pipeline.
778 std::vector<std::function<void(ModulePassManager &)>> PipelineStartEPCallbacks;
779 std::vector<std::function<void(FunctionPassManager &, PassBuilder::OptimizationLevel)>>
780 OptimizerLastEPCallbacks;
783 PipelineStartEPCallbacks.push_back([VerifyIR](ModulePassManager &MPM) {
784 MPM.addPass(VerifierPass());
788 if (SanitizerOptions) {
789 if (SanitizerOptions->SanitizeMemory) {
790 MemorySanitizerOptions Options(
791 SanitizerOptions->SanitizeMemoryTrackOrigins,
792 SanitizerOptions->SanitizeRecover,
793 /*CompileKernel=*/false);
794 #if LLVM_VERSION_GE(10, 0)
795 PipelineStartEPCallbacks.push_back([Options](ModulePassManager &MPM) {
796 MPM.addPass(MemorySanitizerPass(Options));
799 OptimizerLastEPCallbacks.push_back(
800 [Options](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
801 FPM.addPass(MemorySanitizerPass(Options));
806 if (SanitizerOptions->SanitizeThread) {
807 #if LLVM_VERSION_GE(10, 0)
808 PipelineStartEPCallbacks.push_back([](ModulePassManager &MPM) {
809 MPM.addPass(ThreadSanitizerPass());
812 OptimizerLastEPCallbacks.push_back(
813 [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
814 FPM.addPass(ThreadSanitizerPass());
819 if (SanitizerOptions->SanitizeAddress) {
820 PipelineStartEPCallbacks.push_back([&](ModulePassManager &MPM) {
821 MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
823 OptimizerLastEPCallbacks.push_back(
824 [SanitizerOptions](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
825 FPM.addPass(AddressSanitizerPass(
826 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover,
827 /*UseAfterScope=*/true));
830 PipelineStartEPCallbacks.push_back(
831 [SanitizerOptions](ModulePassManager &MPM) {
832 MPM.addPass(ModuleAddressSanitizerPass(
833 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover));
839 ModulePassManager MPM(DebugPassManager);
840 if (!NoPrepopulatePasses) {
841 if (OptLevel == PassBuilder::O0) {
842 for (const auto &C : PipelineStartEPCallbacks)
845 if (!OptimizerLastEPCallbacks.empty()) {
846 FunctionPassManager FPM(DebugPassManager);
847 for (const auto &C : OptimizerLastEPCallbacks)
849 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
852 MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
854 #if LLVM_VERSION_GE(10, 0)
856 PB.addPGOInstrPassesForO0(
857 MPM, DebugPassManager, PGOOpt->Action == PGOOptions::IRInstr,
858 /*IsCS=*/false, PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
862 for (const auto &C : PipelineStartEPCallbacks)
863 PB.registerPipelineStartEPCallback(C);
864 if (OptStage != LLVMRustOptStage::PreLinkThinLTO) {
865 for (const auto &C : OptimizerLastEPCallbacks)
866 PB.registerOptimizerLastEPCallback(C);
870 case LLVMRustOptStage::PreLinkNoLTO:
871 MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
873 case LLVMRustOptStage::PreLinkThinLTO:
874 MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
875 if (!OptimizerLastEPCallbacks.empty()) {
876 FunctionPassManager FPM(DebugPassManager);
877 for (const auto &C : OptimizerLastEPCallbacks)
879 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
882 case LLVMRustOptStage::PreLinkFatLTO:
883 MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
885 case LLVMRustOptStage::ThinLTO:
886 // FIXME: Does it make sense to pass the ModuleSummaryIndex?
887 // It only seems to be needed for C++ specific optimizations.
888 MPM = PB.buildThinLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
890 case LLVMRustOptStage::FatLTO:
891 MPM = PB.buildLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
897 if (UseThinLTOBuffers) {
898 MPM.addPass(CanonicalizeAliasesPass());
899 MPM.addPass(NameAnonGlobalPass());
902 // Upgrade all calls to old intrinsics first.
903 for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
904 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
906 MPM.run(*TheModule, MAM);
908 // The new pass manager has been available for a long time,
909 // but we don't bother supporting it on old LLVM versions.
910 report_fatal_error("New pass manager only supported since LLVM 9");
914 // Callback to demangle function name
916 // * name to be demangled
919 // * output buffer len
920 // Returns len of demangled string, or 0 if demangle failed.
921 typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
926 class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
928 std::vector<char> Buf;
931 RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
933 // Return empty string if demangle failed
934 // or if name does not need to be demangled
935 StringRef CallDemangle(StringRef name) {
940 if (Buf.size() < name.size() * 2) {
941 // Semangled name usually shorter than mangled,
942 // but allocate twice as much memory just in case
943 Buf.resize(name.size() * 2);
946 auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
952 auto Demangled = StringRef(Buf.data(), R);
953 if (Demangled == name) {
954 // Do not print anything if demangled name is equal to mangled.
961 void emitFunctionAnnot(const Function *F,
962 formatted_raw_ostream &OS) override {
963 StringRef Demangled = CallDemangle(F->getName());
964 if (Demangled.empty()) {
968 OS << "; " << Demangled << "\n";
971 void emitInstructionAnnot(const Instruction *I,
972 formatted_raw_ostream &OS) override {
975 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
977 Value = CI->getCalledValue();
978 } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
980 Value = II->getCalledValue();
982 // Could demangle more operations, e. g.
983 // `store %place, @function`.
987 if (!Value->hasName()) {
991 StringRef Demangled = CallDemangle(Value->getName());
992 if (Demangled.empty()) {
996 OS << "; " << Name << " " << Demangled << "\n";
1002 extern "C" LLVMRustResult
1003 LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
1004 std::string ErrorInfo;
1006 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
1008 ErrorInfo = EC.message();
1009 if (ErrorInfo != "") {
1010 LLVMRustSetLastError(ErrorInfo.c_str());
1011 return LLVMRustResult::Failure;
1014 RustAssemblyAnnotationWriter AAW(Demangle);
1015 formatted_raw_ostream FOS(OS);
1016 unwrap(M)->print(FOS, &AAW);
1018 return LLVMRustResult::Success;
1021 extern "C" void LLVMRustPrintPasses() {
1022 LLVMInitializePasses();
1023 struct MyListener : PassRegistrationListener {
1024 void passEnumerate(const PassInfo *Info) {
1025 StringRef PassArg = Info->getPassArgument();
1026 StringRef PassName = Info->getPassName();
1027 if (!PassArg.empty()) {
1028 // These unsigned->signed casts could theoretically overflow, but
1029 // realistically never will (and even if, the result is implementation
1030 // defined rather plain UB).
1031 printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
1032 (int)PassName.size(), PassName.data());
1037 PassRegistry *PR = PassRegistry::getPassRegistry();
1038 PR->enumerateWith(&Listener);
1041 extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
1042 bool AddLifetimes) {
1043 unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
1046 extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
1048 llvm::legacy::PassManager passes;
1050 auto PreserveFunctions = [=](const GlobalValue &GV) {
1051 for (size_t I = 0; I < Len; I++) {
1052 if (GV.getName() == Symbols[I]) {
1059 passes.add(llvm::createInternalizePass(PreserveFunctions));
1061 passes.run(*unwrap(M));
1064 extern "C" void LLVMRustMarkAllFunctionsNounwind(LLVMModuleRef M) {
1065 for (Module::iterator GV = unwrap(M)->begin(), E = unwrap(M)->end(); GV != E;
1067 GV->setDoesNotThrow();
1068 Function *F = dyn_cast<Function>(GV);
1072 for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
1073 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
1074 if (isa<InvokeInst>(I)) {
1075 InvokeInst *CI = cast<InvokeInst>(I);
1076 CI->setDoesNotThrow();
1084 LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
1085 LLVMTargetMachineRef TMR) {
1086 TargetMachine *Target = unwrap(TMR);
1087 unwrap(Module)->setDataLayout(Target->createDataLayout());
1090 extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
1091 unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
1094 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
1095 unwrap(M)->setPIELevel(PIELevel::Level::Large);
1098 // Here you'll find an implementation of ThinLTO as used by the Rust compiler
1099 // right now. This ThinLTO support is only enabled on "recent ish" versions of
1100 // LLVM, and otherwise it's just blanket rejected from other compilers.
1102 // Most of this implementation is straight copied from LLVM. At the time of
1103 // this writing it wasn't *quite* suitable to reuse more code from upstream
1104 // for our purposes, but we should strive to upstream this support once it's
1105 // ready to go! I figure we may want a bit of testing locally first before
1106 // sending this upstream to LLVM. I hear though they're quite eager to receive
1107 // feedback like this!
1109 // If you're reading this code and wondering "what in the world" or you're
1110 // working "good lord by LLVM upgrade is *still* failing due to these bindings"
1111 // then fear not! (ok maybe fear a little). All code here is mostly based
1112 // on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
1114 // You'll find that the general layout here roughly corresponds to the `run`
1115 // method in that file as well as `ProcessThinLTOModule`. Functions are
1116 // specifically commented below as well, but if you're updating this code
1117 // or otherwise trying to understand it, the LLVM source will be useful in
1118 // interpreting the mysteries within.
1120 // Otherwise I'll apologize in advance, it probably requires a relatively
1121 // significant investment on your part to "truly understand" what's going on
1122 // here. Not saying I do myself, but it took me awhile staring at LLVM's source
1123 // and various online resources about ThinLTO to make heads or tails of all
1126 // This is a shared data structure which *must* be threadsafe to share
1127 // read-only amongst threads. This also corresponds basically to the arguments
1128 // of the `ProcessThinLTOModule` function in the LLVM source.
1129 struct LLVMRustThinLTOData {
1130 // The combined index that is the global analysis over all modules we're
1131 // performing ThinLTO for. This is mostly managed by LLVM.
1132 ModuleSummaryIndex Index;
1134 // All modules we may look at, stored as in-memory serialized versions. This
1135 // is later used when inlining to ensure we can extract any module to inline
1137 StringMap<MemoryBufferRef> ModuleMap;
1139 // A set that we manage of everything we *don't* want internalized. Note that
1140 // this includes all transitive references right now as well, but it may not
1142 DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
1144 // Not 100% sure what these are, but they impact what's internalized and
1145 // what's inlined across modules, I believe.
1146 StringMap<FunctionImporter::ImportMapTy> ImportLists;
1147 StringMap<FunctionImporter::ExportSetTy> ExportLists;
1148 StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
1150 LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
1153 // Just an argument to the `LLVMRustCreateThinLTOData` function below.
1154 struct LLVMRustThinLTOModule {
1155 const char *identifier;
1160 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
1162 static const GlobalValueSummary *
1163 getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
1164 auto StrongDefForLinker = llvm::find_if(
1165 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1166 auto Linkage = Summary->linkage();
1167 return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
1168 !GlobalValue::isWeakForLinker(Linkage);
1170 if (StrongDefForLinker != GVSummaryList.end())
1171 return StrongDefForLinker->get();
1173 auto FirstDefForLinker = llvm::find_if(
1174 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1175 auto Linkage = Summary->linkage();
1176 return !GlobalValue::isAvailableExternallyLinkage(Linkage);
1178 if (FirstDefForLinker == GVSummaryList.end())
1180 return FirstDefForLinker->get();
1183 // The main entry point for creating the global ThinLTO analysis. The structure
1184 // here is basically the same as before threads are spawned in the `run`
1185 // function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
1186 extern "C" LLVMRustThinLTOData*
1187 LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
1189 const char **preserved_symbols,
1191 #if LLVM_VERSION_GE(10, 0)
1192 auto Ret = std::make_unique<LLVMRustThinLTOData>();
1194 auto Ret = llvm::make_unique<LLVMRustThinLTOData>();
1197 // Load each module's summary and merge it into one combined index
1198 for (int i = 0; i < num_modules; i++) {
1199 auto module = &modules[i];
1200 StringRef buffer(module->data, module->len);
1201 MemoryBufferRef mem_buffer(buffer, module->identifier);
1203 Ret->ModuleMap[module->identifier] = mem_buffer;
1205 if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
1206 LLVMRustSetLastError(toString(std::move(Err)).c_str());
1211 // Collect for each module the list of function it defines (GUID -> Summary)
1212 Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
1214 // Convert the preserved symbols set from string to GUID, this is then needed
1215 // for internalization.
1216 for (int i = 0; i < num_symbols; i++) {
1217 auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
1218 Ret->GUIDPreservedSymbols.insert(GUID);
1221 // Collect the import/export lists for all modules from the call-graph in the
1224 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
1225 auto deadIsPrevailing = [&](GlobalValue::GUID G) {
1226 return PrevailingType::Unknown;
1228 // We don't have a complete picture in our use of ThinLTO, just our immediate
1229 // crate, so we need `ImportEnabled = false` to limit internalization.
1230 // Otherwise, we sometimes lose `static` values -- see #60184.
1231 computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
1232 deadIsPrevailing, /* ImportEnabled = */ false);
1233 ComputeCrossModuleImport(
1235 Ret->ModuleToDefinedGVSummaries,
1240 // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
1241 // impacts the caching.
1243 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
1244 // being lifted from `lib/LTO/LTO.cpp` as well
1245 StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
1246 DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
1247 for (auto &I : Ret->Index) {
1248 if (I.second.SummaryList.size() > 1)
1249 PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
1251 auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
1252 const auto &Prevailing = PrevailingCopy.find(GUID);
1253 if (Prevailing == PrevailingCopy.end())
1255 return Prevailing->second == S;
1257 auto recordNewLinkage = [&](StringRef ModuleIdentifier,
1258 GlobalValue::GUID GUID,
1259 GlobalValue::LinkageTypes NewLinkage) {
1260 ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
1262 #if LLVM_VERSION_GE(9, 0)
1263 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
1264 Ret->GUIDPreservedSymbols);
1266 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1269 // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
1270 // callback below. This callback below will dictate the linkage for all
1271 // summaries in the index, and we basically just only want to ensure that dead
1272 // symbols are internalized. Otherwise everything that's already external
1273 // linkage will stay as external, and internal will stay as internal.
1274 std::set<GlobalValue::GUID> ExportedGUIDs;
1275 for (auto &List : Ret->Index) {
1276 for (auto &GVS: List.second.SummaryList) {
1277 if (GlobalValue::isLocalLinkage(GVS->linkage()))
1279 auto GUID = GVS->getOriginalName();
1280 if (GVS->flags().Live)
1281 ExportedGUIDs.insert(GUID);
1284 #if LLVM_VERSION_GE(10, 0)
1285 auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
1286 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1287 return (ExportList != Ret->ExportLists.end() &&
1288 ExportList->second.count(VI)) ||
1289 ExportedGUIDs.count(VI.getGUID());
1291 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
1293 auto isExported = [&](StringRef ModuleIdentifier, GlobalValue::GUID GUID) {
1294 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1295 return (ExportList != Ret->ExportLists.end() &&
1296 ExportList->second.count(GUID)) ||
1297 ExportedGUIDs.count(GUID);
1299 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported);
1302 return Ret.release();
1306 LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
1310 // Below are the various passes that happen *per module* when doing ThinLTO.
1312 // In other words, these are the functions that are all run concurrently
1313 // with one another, one per module. The passes here correspond to the analysis
1314 // passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
1315 // `ProcessThinLTOModule` function. Here they're split up into separate steps
1316 // so rustc can save off the intermediate bytecode between each step.
1319 LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1320 Module &Mod = *unwrap(M);
1321 if (renameModuleForThinLTO(Mod, Data->Index)) {
1322 LLVMRustSetLastError("renameModuleForThinLTO failed");
1329 LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1330 Module &Mod = *unwrap(M);
1331 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1332 thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
1337 LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1338 Module &Mod = *unwrap(M);
1339 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1340 thinLTOInternalizeModule(Mod, DefinedGlobals);
1345 LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1346 Module &Mod = *unwrap(M);
1348 const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
1349 auto Loader = [&](StringRef Identifier) {
1350 const auto &Memory = Data->ModuleMap.lookup(Identifier);
1351 auto &Context = Mod.getContext();
1352 auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
1357 // The rest of this closure is a workaround for
1358 // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
1359 // we accidentally import wasm custom sections into different modules,
1360 // duplicating them by in the final output artifact.
1362 // The issue is worked around here by manually removing the
1363 // `wasm.custom_sections` named metadata node from any imported module. This
1364 // we know isn't used by any optimization pass so there's no need for it to
1367 // Note that the metadata is currently lazily loaded, so we materialize it
1368 // here before looking up if there's metadata inside. The `FunctionImporter`
1369 // will immediately materialize metadata anyway after an import, so this
1370 // shouldn't be a perf hit.
1371 if (Error Err = (*MOrErr)->materializeMetadata()) {
1372 Expected<std::unique_ptr<Module>> Ret(std::move(Err));
1376 auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
1377 if (WasmCustomSections)
1378 WasmCustomSections->eraseFromParent();
1382 FunctionImporter Importer(Data->Index, Loader);
1383 Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
1385 LLVMRustSetLastError(toString(Result.takeError()).c_str());
1391 extern "C" typedef void (*LLVMRustModuleNameCallback)(void*, // payload
1392 const char*, // importing module name
1393 const char*); // imported module name
1395 // Calls `module_name_callback` for each module import done by ThinLTO.
1396 // The callback is provided with regular null-terminated C strings.
1398 LLVMRustGetThinLTOModuleImports(const LLVMRustThinLTOData *data,
1399 LLVMRustModuleNameCallback module_name_callback,
1400 void* callback_payload) {
1401 for (const auto& importing_module : data->ImportLists) {
1402 const std::string importing_module_id = importing_module.getKey().str();
1403 const auto& imports = importing_module.getValue();
1404 for (const auto& imported_module : imports) {
1405 const std::string imported_module_id = imported_module.getKey().str();
1406 module_name_callback(callback_payload,
1407 importing_module_id.c_str(),
1408 imported_module_id.c_str());
1413 // This struct and various functions are sort of a hack right now, but the
1414 // problem is that we've got in-memory LLVM modules after we generate and
1415 // optimize all codegen-units for one compilation in rustc. To be compatible
1416 // with the LTO support above we need to serialize the modules plus their
1417 // ThinLTO summary into memory.
1419 // This structure is basically an owned version of a serialize module, with
1420 // a ThinLTO summary attached.
1421 struct LLVMRustThinLTOBuffer {
1425 extern "C" LLVMRustThinLTOBuffer*
1426 LLVMRustThinLTOBufferCreate(LLVMModuleRef M) {
1427 #if LLVM_VERSION_GE(10, 0)
1428 auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
1430 auto Ret = llvm::make_unique<LLVMRustThinLTOBuffer>();
1433 raw_string_ostream OS(Ret->data);
1435 legacy::PassManager PM;
1436 PM.add(createWriteThinLTOBitcodePass(OS));
1440 return Ret.release();
1444 LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
1448 extern "C" const void*
1449 LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
1450 return Buffer->data.data();
1454 LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
1455 return Buffer->data.length();
1458 // This is what we used to parse upstream bitcode for actual ThinLTO
1459 // processing. We'll call this once per module optimized through ThinLTO, and
1460 // it'll be called concurrently on many threads.
1461 extern "C" LLVMModuleRef
1462 LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
1465 const char *identifier) {
1466 StringRef Data(data, len);
1467 MemoryBufferRef Buffer(Data, identifier);
1468 unwrap(Context)->enableDebugTypeODRUniquing();
1469 Expected<std::unique_ptr<Module>> SrcOrError =
1470 parseBitcodeFile(Buffer, *unwrap(Context));
1472 LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
1475 return wrap(std::move(*SrcOrError).release());
1478 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1479 // the comment in `back/lto.rs` for why this exists.
1481 LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
1483 DICompileUnit **B) {
1484 Module *M = unwrap(Mod);
1485 DICompileUnit **Cur = A;
1486 DICompileUnit **Next = B;
1487 for (DICompileUnit *CU : M->debug_compile_units()) {
1496 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1497 // the comment in `back/lto.rs` for why this exists.
1499 LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
1500 Module *M = unwrap(Mod);
1502 // If the original source module didn't have a `DICompileUnit` then try to
1503 // merge all the existing compile units. If there aren't actually any though
1504 // then there's not much for us to do so return.
1505 if (Unit == nullptr) {
1506 for (DICompileUnit *CU : M->debug_compile_units()) {
1510 if (Unit == nullptr)
1514 // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
1515 // process it recursively. Note that we specifically iterate over instructions
1516 // to ensure we feed everything into it.
1517 DebugInfoFinder Finder;
1518 Finder.processModule(*M);
1519 for (Function &F : M->functions()) {
1520 for (auto &FI : F) {
1521 for (Instruction &BI : FI) {
1522 if (auto Loc = BI.getDebugLoc())
1523 Finder.processLocation(*M, Loc);
1524 if (auto DVI = dyn_cast<DbgValueInst>(&BI))
1525 Finder.processValue(*M, DVI);
1526 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1527 Finder.processDeclare(*M, DDI);
1532 // After we've found all our debuginfo, rewrite all subprograms to point to
1533 // the same `DICompileUnit`.
1534 for (auto &F : Finder.subprograms()) {
1535 F->replaceUnit(Unit);
1538 // Erase any other references to other `DICompileUnit` instances, the verifier
1539 // will later ensure that we don't actually have any other stale references to
1541 auto *MD = M->getNamedMetadata("llvm.dbg.cu");
1542 MD->clearOperands();
1543 MD->addOperand(Unit);