8 #include "llvm/Analysis/TargetLibraryInfo.h"
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/CodeGen/TargetSubtargetInfo.h"
11 #include "llvm/InitializePasses.h"
12 #include "llvm/IR/AutoUpgrade.h"
13 #include "llvm/IR/AssemblyAnnotationWriter.h"
14 #include "llvm/IR/IntrinsicInst.h"
15 #include "llvm/IR/Verifier.h"
16 #include "llvm/Passes/PassBuilder.h"
17 #if LLVM_VERSION_GE(9, 0)
18 #include "llvm/Passes/StandardInstrumentations.h"
20 #include "llvm/Support/CBindingWrapping.h"
21 #include "llvm/Support/FileSystem.h"
22 #include "llvm/Support/Host.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
25 #include "llvm/Transforms/IPO/AlwaysInliner.h"
26 #include "llvm/Transforms/IPO/FunctionImport.h"
27 #include "llvm/Transforms/Utils/FunctionImportUtils.h"
28 #include "llvm/LTO/LTO.h"
29 #include "llvm-c/Transforms/PassManagerBuilder.h"
31 #include "llvm/Transforms/Instrumentation.h"
32 #if LLVM_VERSION_GE(9, 0)
33 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
34 #include "llvm/Support/TimeProfiler.h"
36 #if LLVM_VERSION_GE(8, 0)
37 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
38 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
40 #if LLVM_VERSION_GE(9, 0)
41 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
43 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
47 typedef struct LLVMOpaquePass *LLVMPassRef;
48 typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
50 DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
51 DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
52 DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBuilder,
53 LLVMPassManagerBuilderRef)
55 extern "C" void LLVMInitializePasses() {
56 PassRegistry &Registry = *PassRegistry::getPassRegistry();
57 initializeCore(Registry);
58 initializeCodeGen(Registry);
59 initializeScalarOpts(Registry);
60 initializeVectorization(Registry);
61 initializeIPO(Registry);
62 initializeAnalysis(Registry);
63 initializeTransformUtils(Registry);
64 initializeInstCombine(Registry);
65 initializeInstrumentation(Registry);
66 initializeTarget(Registry);
69 extern "C" void LLVMTimeTraceProfilerInitialize() {
70 #if LLVM_VERSION_GE(9, 0)
71 timeTraceProfilerInitialize();
75 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
76 #if LLVM_VERSION_GE(9, 0)
77 StringRef FN(FileName);
79 raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
81 timeTraceProfilerWrite(OS);
82 timeTraceProfilerCleanup();
86 enum class LLVMRustPassKind {
92 static LLVMRustPassKind toRust(PassKind Kind) {
95 return LLVMRustPassKind::Function;
97 return LLVMRustPassKind::Module;
99 return LLVMRustPassKind::Other;
103 extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
104 StringRef SR(PassName);
105 PassRegistry *PR = PassRegistry::getPassRegistry();
107 const PassInfo *PI = PR->getPassInfo(SR);
109 return wrap(PI->createPass());
114 extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
115 const bool CompileKernel = false;
116 const bool UseAfterScope = true;
118 return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
121 extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
122 const bool CompileKernel = false;
124 #if LLVM_VERSION_GE(9, 0)
125 return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
127 return wrap(createAddressSanitizerModulePass(CompileKernel, Recover));
131 extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
132 #if LLVM_VERSION_GE(9, 0)
133 const bool CompileKernel = false;
135 return wrap(createMemorySanitizerLegacyPassPass(
136 MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
137 #elif LLVM_VERSION_GE(8, 0)
138 return wrap(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover));
140 return wrap(createMemorySanitizerPass(TrackOrigins, Recover));
144 extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
145 #if LLVM_VERSION_GE(8, 0)
146 return wrap(createThreadSanitizerLegacyPassPass());
148 return wrap(createThreadSanitizerPass());
152 extern "C" LLVMRustPassKind LLVMRustPassKind(LLVMPassRef RustPass) {
154 Pass *Pass = unwrap(RustPass);
155 return toRust(Pass->getPassKind());
158 extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
160 Pass *Pass = unwrap(RustPass);
161 PassManagerBase *PMB = unwrap(PMR);
166 void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
167 LLVMPassManagerBuilderRef PMBR,
168 LLVMPassManagerRef PMR
170 unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
174 void LLVMRustAddLastExtensionPasses(
175 LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
176 auto AddExtensionPasses = [Passes, NumPasses](
177 const PassManagerBuilder &Builder, PassManagerBase &PM) {
178 for (size_t I = 0; I < NumPasses; I++) {
179 PM.add(unwrap(Passes[I]));
182 // Add the passes to both of the pre-finalization extension points,
183 // so they are run for optimized and non-optimized builds.
184 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
186 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
190 #ifdef LLVM_COMPONENT_X86
191 #define SUBTARGET_X86 SUBTARGET(X86)
193 #define SUBTARGET_X86
196 #ifdef LLVM_COMPONENT_ARM
197 #define SUBTARGET_ARM SUBTARGET(ARM)
199 #define SUBTARGET_ARM
202 #ifdef LLVM_COMPONENT_AARCH64
203 #define SUBTARGET_AARCH64 SUBTARGET(AArch64)
205 #define SUBTARGET_AARCH64
208 #ifdef LLVM_COMPONENT_MIPS
209 #define SUBTARGET_MIPS SUBTARGET(Mips)
211 #define SUBTARGET_MIPS
214 #ifdef LLVM_COMPONENT_POWERPC
215 #define SUBTARGET_PPC SUBTARGET(PPC)
217 #define SUBTARGET_PPC
220 #ifdef LLVM_COMPONENT_SYSTEMZ
221 #define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
223 #define SUBTARGET_SYSTEMZ
226 #ifdef LLVM_COMPONENT_MSP430
227 #define SUBTARGET_MSP430 SUBTARGET(MSP430)
229 #define SUBTARGET_MSP430
232 #ifdef LLVM_COMPONENT_RISCV
233 #define SUBTARGET_RISCV SUBTARGET(RISCV)
235 #define SUBTARGET_RISCV
238 #ifdef LLVM_COMPONENT_SPARC
239 #define SUBTARGET_SPARC SUBTARGET(Sparc)
241 #define SUBTARGET_SPARC
244 #ifdef LLVM_COMPONENT_HEXAGON
245 #define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
247 #define SUBTARGET_HEXAGON
250 #define GEN_SUBTARGETS \
262 #define SUBTARGET(x) \
264 extern const SubtargetFeatureKV x##FeatureKV[]; \
265 extern const SubtargetFeatureKV x##SubTypeKV[]; \
271 extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
272 const char *Feature) {
273 TargetMachine *Target = unwrap(TM);
274 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
275 return MCInfo->checkFeatures(std::string("+") + Feature);
278 enum class LLVMRustCodeModel {
287 static CodeModel::Model fromRust(LLVMRustCodeModel Model) {
289 case LLVMRustCodeModel::Small:
290 return CodeModel::Small;
291 case LLVMRustCodeModel::Kernel:
292 return CodeModel::Kernel;
293 case LLVMRustCodeModel::Medium:
294 return CodeModel::Medium;
295 case LLVMRustCodeModel::Large:
296 return CodeModel::Large;
298 report_fatal_error("Bad CodeModel.");
302 enum class LLVMRustCodeGenOptLevel {
310 static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
312 case LLVMRustCodeGenOptLevel::None:
313 return CodeGenOpt::None;
314 case LLVMRustCodeGenOptLevel::Less:
315 return CodeGenOpt::Less;
316 case LLVMRustCodeGenOptLevel::Default:
317 return CodeGenOpt::Default;
318 case LLVMRustCodeGenOptLevel::Aggressive:
319 return CodeGenOpt::Aggressive;
321 report_fatal_error("Bad CodeGenOptLevel.");
325 enum class LLVMRustPassBuilderOptLevel {
334 static PassBuilder::OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
336 case LLVMRustPassBuilderOptLevel::O0:
337 return PassBuilder::O0;
338 case LLVMRustPassBuilderOptLevel::O1:
339 return PassBuilder::O1;
340 case LLVMRustPassBuilderOptLevel::O2:
341 return PassBuilder::O2;
342 case LLVMRustPassBuilderOptLevel::O3:
343 return PassBuilder::O3;
344 case LLVMRustPassBuilderOptLevel::Os:
345 return PassBuilder::Os;
346 case LLVMRustPassBuilderOptLevel::Oz:
347 return PassBuilder::Oz;
349 report_fatal_error("Bad PassBuilderOptLevel.");
353 enum class LLVMRustRelocMode {
363 static Optional<Reloc::Model> fromRust(LLVMRustRelocMode RustReloc) {
365 case LLVMRustRelocMode::Default:
367 case LLVMRustRelocMode::Static:
368 return Reloc::Static;
369 case LLVMRustRelocMode::PIC:
371 case LLVMRustRelocMode::DynamicNoPic:
372 return Reloc::DynamicNoPIC;
373 case LLVMRustRelocMode::ROPI:
375 case LLVMRustRelocMode::RWPI:
377 case LLVMRustRelocMode::ROPIRWPI:
378 return Reloc::ROPI_RWPI;
380 report_fatal_error("Bad RelocModel.");
384 /// getLongestEntryLength - Return the length of the longest entry in the table.
385 template<typename KV>
386 static size_t getLongestEntryLength(ArrayRef<KV> Table) {
388 for (auto &I : Table)
389 MaxLen = std::max(MaxLen, std::strlen(I.Key));
393 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
394 const TargetMachine *Target = unwrap(TM);
395 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
396 const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
397 const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
398 const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
399 unsigned MaxCPULen = getLongestEntryLength(CPUTable);
401 printf("Available CPUs for this target:\n");
402 if (HostArch == TargetArch) {
403 const StringRef HostCPU = sys::getHostCPUName();
404 printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
405 MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
407 for (auto &CPU : CPUTable)
408 printf(" %-*s\n", MaxCPULen, CPU.Key);
412 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef TM) {
413 const TargetMachine *Target = unwrap(TM);
414 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
415 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
416 unsigned MaxFeatLen = getLongestEntryLength(FeatTable);
418 printf("Available features for this target:\n");
419 for (auto &Feature : FeatTable)
420 printf(" %-*s - %s.\n", MaxFeatLen, Feature.Key, Feature.Desc);
423 printf("Use +feature to enable a feature, or -feature to disable it.\n"
424 "For example, rustc -C -target-cpu=mycpu -C "
425 "target-feature=+feature1,-feature2\n\n");
430 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
431 printf("Target CPU help is not supported by this LLVM version.\n\n");
434 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef) {
435 printf("Target features help is not supported by this LLVM version.\n\n");
439 extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
440 StringRef Name = sys::getHostCPUName();
445 extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
446 const char *TripleStr, const char *CPU, const char *Feature,
447 const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocMode RustReloc,
448 LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
449 bool PositionIndependentExecutable, bool FunctionSections,
451 bool TrapUnreachable,
454 bool EmitStackSizeSection,
455 bool RelaxELFRelocations) {
457 auto OptLevel = fromRust(RustOptLevel);
458 auto RM = fromRust(RustReloc);
461 Triple Trip(Triple::normalize(TripleStr));
462 const llvm::Target *TheTarget =
463 TargetRegistry::lookupTarget(Trip.getTriple(), Error);
464 if (TheTarget == nullptr) {
465 LLVMRustSetLastError(Error.c_str());
469 TargetOptions Options;
471 Options.FloatABIType = FloatABI::Default;
473 Options.FloatABIType = FloatABI::Soft;
475 Options.DataSections = DataSections;
476 Options.FunctionSections = FunctionSections;
477 Options.MCOptions.AsmVerbose = AsmComments;
478 Options.MCOptions.PreserveAsmComments = AsmComments;
479 Options.MCOptions.ABIName = ABIStr;
480 Options.RelaxELFRelocations = RelaxELFRelocations;
482 if (TrapUnreachable) {
483 // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
484 // This limits the extent of possible undefined behavior in some cases, as
485 // it prevents control flow from "falling through" into whatever code
486 // happens to be laid out next in memory.
487 Options.TrapUnreachable = true;
491 Options.ThreadModel = ThreadModel::Single;
494 Options.EmitStackSizeSection = EmitStackSizeSection;
496 Optional<CodeModel::Model> CM;
497 if (RustCM != LLVMRustCodeModel::None)
498 CM = fromRust(RustCM);
499 TargetMachine *TM = TheTarget->createTargetMachine(
500 Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
504 extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
508 extern "C" void LLVMRustConfigurePassManagerBuilder(
509 LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
510 bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
511 const char* PGOGenPath, const char* PGOUsePath) {
512 unwrap(PMBR)->MergeFunctions = MergeFunctions;
513 unwrap(PMBR)->SLPVectorize = SLPVectorize;
514 unwrap(PMBR)->OptLevel = fromRust(OptLevel);
515 unwrap(PMBR)->LoopVectorize = LoopVectorize;
516 unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
520 unwrap(PMBR)->EnablePGOInstrGen = true;
521 unwrap(PMBR)->PGOInstrGen = PGOGenPath;
525 unwrap(PMBR)->PGOInstrUse = PGOUsePath;
529 // Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
530 // field of a PassManagerBuilder, we expose our own method of doing so.
531 extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
533 bool DisableSimplifyLibCalls) {
534 Triple TargetTriple(unwrap(M)->getTargetTriple());
535 TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
536 if (DisableSimplifyLibCalls)
537 TLI->disableAllFunctions();
538 unwrap(PMBR)->LibraryInfo = TLI;
541 // Unfortunately, the LLVM C API doesn't provide a way to create the
542 // TargetLibraryInfo pass, so we use this method to do so.
543 extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
544 bool DisableSimplifyLibCalls) {
545 Triple TargetTriple(unwrap(M)->getTargetTriple());
546 TargetLibraryInfoImpl TLII(TargetTriple);
547 if (DisableSimplifyLibCalls)
548 TLII.disableAllFunctions();
549 unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
552 // Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
553 // all the functions in a module, so we do that manually here. You'll find
554 // similar code in clang's BackendUtil.cpp file.
555 extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
557 llvm::legacy::FunctionPassManager *P =
558 unwrap<llvm::legacy::FunctionPassManager>(PMR);
559 P->doInitialization();
561 // Upgrade all calls to old intrinsics first.
562 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
563 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
565 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
567 if (!I->isDeclaration())
573 extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
574 // Initializing the command-line options more than once is not allowed. So,
575 // check if they've already been initialized. (This could happen if we're
576 // being called from rustpkg, for example). If the arguments change, then
577 // that's just kinda unfortunate.
578 static bool Initialized = false;
582 cl::ParseCommandLineOptions(Argc, Argv);
585 enum class LLVMRustFileType {
591 #if LLVM_VERSION_GE(10, 0)
592 static CodeGenFileType fromRust(LLVMRustFileType Type) {
594 case LLVMRustFileType::AssemblyFile:
595 return CGFT_AssemblyFile;
596 case LLVMRustFileType::ObjectFile:
597 return CGFT_ObjectFile;
599 report_fatal_error("Bad FileType.");
603 static TargetMachine::CodeGenFileType fromRust(LLVMRustFileType Type) {
605 case LLVMRustFileType::AssemblyFile:
606 return TargetMachine::CGFT_AssemblyFile;
607 case LLVMRustFileType::ObjectFile:
608 return TargetMachine::CGFT_ObjectFile;
610 report_fatal_error("Bad FileType.");
615 extern "C" LLVMRustResult
616 LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
617 LLVMModuleRef M, const char *Path,
618 LLVMRustFileType RustFileType) {
619 llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
620 auto FileType = fromRust(RustFileType);
622 std::string ErrorInfo;
624 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
626 ErrorInfo = EC.message();
627 if (ErrorInfo != "") {
628 LLVMRustSetLastError(ErrorInfo.c_str());
629 return LLVMRustResult::Failure;
632 buffer_ostream BOS(OS);
633 unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
636 // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
637 // stream (OS), so the only real safe place to delete this is here? Don't we
638 // wish this was written in Rust?
639 LLVMDisposePassManager(PMR);
640 return LLVMRustResult::Success;
643 extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
644 const char*, // pass name
645 const char*); // IR name
646 extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
648 #if LLVM_VERSION_GE(9, 0)
650 std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
651 if (any_isa<const Module *>(WrappedIr))
652 return any_cast<const Module *>(WrappedIr)->getName().str();
653 if (any_isa<const Function *>(WrappedIr))
654 return any_cast<const Function *>(WrappedIr)->getName().str();
655 if (any_isa<const Loop *>(WrappedIr))
656 return any_cast<const Loop *>(WrappedIr)->getName().str();
657 if (any_isa<const LazyCallGraph::SCC *>(WrappedIr))
658 return any_cast<const LazyCallGraph::SCC *>(WrappedIr)->getName();
663 void LLVMSelfProfileInitializeCallbacks(
664 PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
665 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
666 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
667 PIC.registerBeforePassCallback([LlvmSelfProfiler, BeforePassCallback](
668 StringRef Pass, llvm::Any Ir) {
669 std::string PassName = Pass.str();
670 std::string IrName = LLVMRustwrappedIrGetName(Ir);
671 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
675 PIC.registerAfterPassCallback(
676 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
677 AfterPassCallback(LlvmSelfProfiler);
680 PIC.registerAfterPassInvalidatedCallback(
681 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass) {
682 AfterPassCallback(LlvmSelfProfiler);
685 PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
686 StringRef Pass, llvm::Any Ir) {
687 std::string PassName = Pass.str();
688 std::string IrName = LLVMRustwrappedIrGetName(Ir);
689 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
692 PIC.registerAfterAnalysisCallback(
693 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
694 AfterPassCallback(LlvmSelfProfiler);
699 enum class LLVMRustOptStage {
707 struct LLVMRustSanitizerOptions {
710 bool SanitizeAddress;
711 bool SanitizeRecover;
712 int SanitizeMemoryTrackOrigins;
716 LLVMRustOptimizeWithNewPassManager(
717 LLVMModuleRef ModuleRef,
718 LLVMTargetMachineRef TMRef,
719 LLVMRustPassBuilderOptLevel OptLevelRust,
720 LLVMRustOptStage OptStage,
721 bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
722 bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
723 bool DisableSimplifyLibCalls,
724 LLVMRustSanitizerOptions *SanitizerOptions,
725 const char *PGOGenPath, const char *PGOUsePath,
726 void* LlvmSelfProfiler,
727 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
728 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
729 #if LLVM_VERSION_GE(9, 0)
730 Module *TheModule = unwrap(ModuleRef);
731 TargetMachine *TM = unwrap(TMRef);
732 PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
734 // FIXME: MergeFunctions is not supported by NewPM yet.
735 (void) MergeFunctions;
737 PipelineTuningOptions PTO;
738 PTO.LoopUnrolling = UnrollLoops;
739 PTO.LoopInterleaving = UnrollLoops;
740 PTO.LoopVectorization = LoopVectorize;
741 PTO.SLPVectorization = SLPVectorize;
743 PassInstrumentationCallbacks PIC;
744 StandardInstrumentations SI;
745 SI.registerCallbacks(PIC);
747 if (LlvmSelfProfiler){
748 LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
751 Optional<PGOOptions> PGOOpt;
754 PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr);
755 } else if (PGOUsePath) {
757 PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse);
760 PassBuilder PB(TM, PTO, PGOOpt, &PIC);
762 // FIXME: We may want to expose this as an option.
763 bool DebugPassManager = false;
764 LoopAnalysisManager LAM(DebugPassManager);
765 FunctionAnalysisManager FAM(DebugPassManager);
766 CGSCCAnalysisManager CGAM(DebugPassManager);
767 ModuleAnalysisManager MAM(DebugPassManager);
769 FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
771 Triple TargetTriple(TheModule->getTargetTriple());
772 std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
773 if (DisableSimplifyLibCalls)
774 TLII->disableAllFunctions();
775 FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
777 PB.registerModuleAnalyses(MAM);
778 PB.registerCGSCCAnalyses(CGAM);
779 PB.registerFunctionAnalyses(FAM);
780 PB.registerLoopAnalyses(LAM);
781 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
783 // We manually collect pipeline callbacks so we can apply them at O0, where the
784 // PassBuilder does not create a pipeline.
785 std::vector<std::function<void(ModulePassManager &)>> PipelineStartEPCallbacks;
786 std::vector<std::function<void(FunctionPassManager &, PassBuilder::OptimizationLevel)>>
787 OptimizerLastEPCallbacks;
790 PipelineStartEPCallbacks.push_back([VerifyIR](ModulePassManager &MPM) {
791 MPM.addPass(VerifierPass());
795 if (SanitizerOptions) {
796 if (SanitizerOptions->SanitizeMemory) {
797 MemorySanitizerOptions Options(
798 SanitizerOptions->SanitizeMemoryTrackOrigins,
799 SanitizerOptions->SanitizeRecover,
800 /*CompileKernel=*/false);
801 #if LLVM_VERSION_GE(10, 0)
802 PipelineStartEPCallbacks.push_back([Options](ModulePassManager &MPM) {
803 MPM.addPass(MemorySanitizerPass(Options));
806 OptimizerLastEPCallbacks.push_back(
807 [Options](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
808 FPM.addPass(MemorySanitizerPass(Options));
813 if (SanitizerOptions->SanitizeThread) {
814 #if LLVM_VERSION_GE(10, 0)
815 PipelineStartEPCallbacks.push_back([](ModulePassManager &MPM) {
816 MPM.addPass(ThreadSanitizerPass());
819 OptimizerLastEPCallbacks.push_back(
820 [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
821 FPM.addPass(ThreadSanitizerPass());
826 if (SanitizerOptions->SanitizeAddress) {
827 PipelineStartEPCallbacks.push_back([&](ModulePassManager &MPM) {
828 MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
830 OptimizerLastEPCallbacks.push_back(
831 [SanitizerOptions](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
832 FPM.addPass(AddressSanitizerPass(
833 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover,
834 /*UseAfterScope=*/true));
837 PipelineStartEPCallbacks.push_back(
838 [SanitizerOptions](ModulePassManager &MPM) {
839 MPM.addPass(ModuleAddressSanitizerPass(
840 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover));
846 ModulePassManager MPM(DebugPassManager);
847 if (!NoPrepopulatePasses) {
848 if (OptLevel == PassBuilder::O0) {
849 for (const auto &C : PipelineStartEPCallbacks)
852 if (!OptimizerLastEPCallbacks.empty()) {
853 FunctionPassManager FPM(DebugPassManager);
854 for (const auto &C : OptimizerLastEPCallbacks)
856 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
859 MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
861 #if LLVM_VERSION_GE(10, 0)
863 PB.addPGOInstrPassesForO0(
864 MPM, DebugPassManager, PGOOpt->Action == PGOOptions::IRInstr,
865 /*IsCS=*/false, PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
869 for (const auto &C : PipelineStartEPCallbacks)
870 PB.registerPipelineStartEPCallback(C);
871 for (const auto &C : OptimizerLastEPCallbacks)
872 PB.registerOptimizerLastEPCallback(C);
875 case LLVMRustOptStage::PreLinkNoLTO:
876 MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
878 case LLVMRustOptStage::PreLinkThinLTO:
879 MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
881 case LLVMRustOptStage::PreLinkFatLTO:
882 MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
884 case LLVMRustOptStage::ThinLTO:
885 // FIXME: Does it make sense to pass the ModuleSummaryIndex?
886 // It only seems to be needed for C++ specific optimizations.
887 MPM = PB.buildThinLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
889 case LLVMRustOptStage::FatLTO:
890 MPM = PB.buildLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
896 if (UseThinLTOBuffers) {
897 MPM.addPass(CanonicalizeAliasesPass());
898 MPM.addPass(NameAnonGlobalPass());
901 // Upgrade all calls to old intrinsics first.
902 for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
903 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
905 MPM.run(*TheModule, MAM);
907 // The new pass manager has been available for a long time,
908 // but we don't bother supporting it on old LLVM versions.
909 report_fatal_error("New pass manager only supported since LLVM 9");
913 // Callback to demangle function name
915 // * name to be demangled
918 // * output buffer len
919 // Returns len of demangled string, or 0 if demangle failed.
920 typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
925 class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
927 std::vector<char> Buf;
930 RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
932 // Return empty string if demangle failed
933 // or if name does not need to be demangled
934 StringRef CallDemangle(StringRef name) {
939 if (Buf.size() < name.size() * 2) {
940 // Semangled name usually shorter than mangled,
941 // but allocate twice as much memory just in case
942 Buf.resize(name.size() * 2);
945 auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
951 auto Demangled = StringRef(Buf.data(), R);
952 if (Demangled == name) {
953 // Do not print anything if demangled name is equal to mangled.
960 void emitFunctionAnnot(const Function *F,
961 formatted_raw_ostream &OS) override {
962 StringRef Demangled = CallDemangle(F->getName());
963 if (Demangled.empty()) {
967 OS << "; " << Demangled << "\n";
970 void emitInstructionAnnot(const Instruction *I,
971 formatted_raw_ostream &OS) override {
974 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
976 Value = CI->getCalledValue();
977 } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
979 Value = II->getCalledValue();
981 // Could demangle more operations, e. g.
982 // `store %place, @function`.
986 if (!Value->hasName()) {
990 StringRef Demangled = CallDemangle(Value->getName());
991 if (Demangled.empty()) {
995 OS << "; " << Name << " " << Demangled << "\n";
1001 extern "C" LLVMRustResult
1002 LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
1003 std::string ErrorInfo;
1005 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
1007 ErrorInfo = EC.message();
1008 if (ErrorInfo != "") {
1009 LLVMRustSetLastError(ErrorInfo.c_str());
1010 return LLVMRustResult::Failure;
1013 RustAssemblyAnnotationWriter AAW(Demangle);
1014 formatted_raw_ostream FOS(OS);
1015 unwrap(M)->print(FOS, &AAW);
1017 return LLVMRustResult::Success;
1020 extern "C" void LLVMRustPrintPasses() {
1021 LLVMInitializePasses();
1022 struct MyListener : PassRegistrationListener {
1023 void passEnumerate(const PassInfo *Info) {
1024 StringRef PassArg = Info->getPassArgument();
1025 StringRef PassName = Info->getPassName();
1026 if (!PassArg.empty()) {
1027 // These unsigned->signed casts could theoretically overflow, but
1028 // realistically never will (and even if, the result is implementation
1029 // defined rather plain UB).
1030 printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
1031 (int)PassName.size(), PassName.data());
1036 PassRegistry *PR = PassRegistry::getPassRegistry();
1037 PR->enumerateWith(&Listener);
1040 extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
1041 bool AddLifetimes) {
1042 unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
1045 extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
1047 llvm::legacy::PassManager passes;
1049 auto PreserveFunctions = [=](const GlobalValue &GV) {
1050 for (size_t I = 0; I < Len; I++) {
1051 if (GV.getName() == Symbols[I]) {
1058 passes.add(llvm::createInternalizePass(PreserveFunctions));
1060 passes.run(*unwrap(M));
1063 extern "C" void LLVMRustMarkAllFunctionsNounwind(LLVMModuleRef M) {
1064 for (Module::iterator GV = unwrap(M)->begin(), E = unwrap(M)->end(); GV != E;
1066 GV->setDoesNotThrow();
1067 Function *F = dyn_cast<Function>(GV);
1071 for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
1072 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
1073 if (isa<InvokeInst>(I)) {
1074 InvokeInst *CI = cast<InvokeInst>(I);
1075 CI->setDoesNotThrow();
1083 LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
1084 LLVMTargetMachineRef TMR) {
1085 TargetMachine *Target = unwrap(TMR);
1086 unwrap(Module)->setDataLayout(Target->createDataLayout());
1089 extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
1090 unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
1093 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
1094 unwrap(M)->setPIELevel(PIELevel::Level::Large);
1097 // Here you'll find an implementation of ThinLTO as used by the Rust compiler
1098 // right now. This ThinLTO support is only enabled on "recent ish" versions of
1099 // LLVM, and otherwise it's just blanket rejected from other compilers.
1101 // Most of this implementation is straight copied from LLVM. At the time of
1102 // this writing it wasn't *quite* suitable to reuse more code from upstream
1103 // for our purposes, but we should strive to upstream this support once it's
1104 // ready to go! I figure we may want a bit of testing locally first before
1105 // sending this upstream to LLVM. I hear though they're quite eager to receive
1106 // feedback like this!
1108 // If you're reading this code and wondering "what in the world" or you're
1109 // working "good lord by LLVM upgrade is *still* failing due to these bindings"
1110 // then fear not! (ok maybe fear a little). All code here is mostly based
1111 // on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
1113 // You'll find that the general layout here roughly corresponds to the `run`
1114 // method in that file as well as `ProcessThinLTOModule`. Functions are
1115 // specifically commented below as well, but if you're updating this code
1116 // or otherwise trying to understand it, the LLVM source will be useful in
1117 // interpreting the mysteries within.
1119 // Otherwise I'll apologize in advance, it probably requires a relatively
1120 // significant investment on your part to "truly understand" what's going on
1121 // here. Not saying I do myself, but it took me awhile staring at LLVM's source
1122 // and various online resources about ThinLTO to make heads or tails of all
1125 // This is a shared data structure which *must* be threadsafe to share
1126 // read-only amongst threads. This also corresponds basically to the arguments
1127 // of the `ProcessThinLTOModule` function in the LLVM source.
1128 struct LLVMRustThinLTOData {
1129 // The combined index that is the global analysis over all modules we're
1130 // performing ThinLTO for. This is mostly managed by LLVM.
1131 ModuleSummaryIndex Index;
1133 // All modules we may look at, stored as in-memory serialized versions. This
1134 // is later used when inlining to ensure we can extract any module to inline
1136 StringMap<MemoryBufferRef> ModuleMap;
1138 // A set that we manage of everything we *don't* want internalized. Note that
1139 // this includes all transitive references right now as well, but it may not
1141 DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
1143 // Not 100% sure what these are, but they impact what's internalized and
1144 // what's inlined across modules, I believe.
1145 StringMap<FunctionImporter::ImportMapTy> ImportLists;
1146 StringMap<FunctionImporter::ExportSetTy> ExportLists;
1147 StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
1149 LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
1152 // Just an argument to the `LLVMRustCreateThinLTOData` function below.
1153 struct LLVMRustThinLTOModule {
1154 const char *identifier;
1159 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
1161 static const GlobalValueSummary *
1162 getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
1163 auto StrongDefForLinker = llvm::find_if(
1164 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1165 auto Linkage = Summary->linkage();
1166 return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
1167 !GlobalValue::isWeakForLinker(Linkage);
1169 if (StrongDefForLinker != GVSummaryList.end())
1170 return StrongDefForLinker->get();
1172 auto FirstDefForLinker = llvm::find_if(
1173 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1174 auto Linkage = Summary->linkage();
1175 return !GlobalValue::isAvailableExternallyLinkage(Linkage);
1177 if (FirstDefForLinker == GVSummaryList.end())
1179 return FirstDefForLinker->get();
1182 // The main entry point for creating the global ThinLTO analysis. The structure
1183 // here is basically the same as before threads are spawned in the `run`
1184 // function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
1185 extern "C" LLVMRustThinLTOData*
1186 LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
1188 const char **preserved_symbols,
1190 #if LLVM_VERSION_GE(10, 0)
1191 auto Ret = std::make_unique<LLVMRustThinLTOData>();
1193 auto Ret = llvm::make_unique<LLVMRustThinLTOData>();
1196 // Load each module's summary and merge it into one combined index
1197 for (int i = 0; i < num_modules; i++) {
1198 auto module = &modules[i];
1199 StringRef buffer(module->data, module->len);
1200 MemoryBufferRef mem_buffer(buffer, module->identifier);
1202 Ret->ModuleMap[module->identifier] = mem_buffer;
1204 if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
1205 LLVMRustSetLastError(toString(std::move(Err)).c_str());
1210 // Collect for each module the list of function it defines (GUID -> Summary)
1211 Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
1213 // Convert the preserved symbols set from string to GUID, this is then needed
1214 // for internalization.
1215 for (int i = 0; i < num_symbols; i++) {
1216 auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
1217 Ret->GUIDPreservedSymbols.insert(GUID);
1220 // Collect the import/export lists for all modules from the call-graph in the
1223 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
1224 auto deadIsPrevailing = [&](GlobalValue::GUID G) {
1225 return PrevailingType::Unknown;
1227 #if LLVM_VERSION_GE(8, 0)
1228 // We don't have a complete picture in our use of ThinLTO, just our immediate
1229 // crate, so we need `ImportEnabled = false` to limit internalization.
1230 // Otherwise, we sometimes lose `static` values -- see #60184.
1231 computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
1232 deadIsPrevailing, /* ImportEnabled = */ false);
1234 computeDeadSymbols(Ret->Index, Ret->GUIDPreservedSymbols, deadIsPrevailing);
1236 ComputeCrossModuleImport(
1238 Ret->ModuleToDefinedGVSummaries,
1243 // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
1244 // impacts the caching.
1246 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
1247 // being lifted from `lib/LTO/LTO.cpp` as well
1248 StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
1249 DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
1250 for (auto &I : Ret->Index) {
1251 if (I.second.SummaryList.size() > 1)
1252 PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
1254 auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
1255 const auto &Prevailing = PrevailingCopy.find(GUID);
1256 if (Prevailing == PrevailingCopy.end())
1258 return Prevailing->second == S;
1260 auto recordNewLinkage = [&](StringRef ModuleIdentifier,
1261 GlobalValue::GUID GUID,
1262 GlobalValue::LinkageTypes NewLinkage) {
1263 ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
1265 #if LLVM_VERSION_GE(9, 0)
1266 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
1267 Ret->GUIDPreservedSymbols);
1268 #elif LLVM_VERSION_GE(8, 0)
1269 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1271 thinLTOResolveWeakForLinkerInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1274 // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
1275 // callback below. This callback below will dictate the linkage for all
1276 // summaries in the index, and we basically just only want to ensure that dead
1277 // symbols are internalized. Otherwise everything that's already external
1278 // linkage will stay as external, and internal will stay as internal.
1279 std::set<GlobalValue::GUID> ExportedGUIDs;
1280 for (auto &List : Ret->Index) {
1281 for (auto &GVS: List.second.SummaryList) {
1282 if (GlobalValue::isLocalLinkage(GVS->linkage()))
1284 auto GUID = GVS->getOriginalName();
1285 if (GVS->flags().Live)
1286 ExportedGUIDs.insert(GUID);
1289 #if LLVM_VERSION_GE(10, 0)
1290 auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
1291 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1292 return (ExportList != Ret->ExportLists.end() &&
1293 ExportList->second.count(VI)) ||
1294 ExportedGUIDs.count(VI.getGUID());
1296 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
1298 auto isExported = [&](StringRef ModuleIdentifier, GlobalValue::GUID GUID) {
1299 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1300 return (ExportList != Ret->ExportLists.end() &&
1301 ExportList->second.count(GUID)) ||
1302 ExportedGUIDs.count(GUID);
1304 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported);
1307 return Ret.release();
1311 LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
1315 // Below are the various passes that happen *per module* when doing ThinLTO.
1317 // In other words, these are the functions that are all run concurrently
1318 // with one another, one per module. The passes here correspond to the analysis
1319 // passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
1320 // `ProcessThinLTOModule` function. Here they're split up into separate steps
1321 // so rustc can save off the intermediate bytecode between each step.
1324 LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1325 Module &Mod = *unwrap(M);
1326 if (renameModuleForThinLTO(Mod, Data->Index)) {
1327 LLVMRustSetLastError("renameModuleForThinLTO failed");
1334 LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1335 Module &Mod = *unwrap(M);
1336 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1337 #if LLVM_VERSION_GE(8, 0)
1338 thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
1340 thinLTOResolveWeakForLinkerModule(Mod, DefinedGlobals);
1346 LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1347 Module &Mod = *unwrap(M);
1348 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1349 thinLTOInternalizeModule(Mod, DefinedGlobals);
1354 LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1355 Module &Mod = *unwrap(M);
1357 const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
1358 auto Loader = [&](StringRef Identifier) {
1359 const auto &Memory = Data->ModuleMap.lookup(Identifier);
1360 auto &Context = Mod.getContext();
1361 auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
1366 // The rest of this closure is a workaround for
1367 // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
1368 // we accidentally import wasm custom sections into different modules,
1369 // duplicating them by in the final output artifact.
1371 // The issue is worked around here by manually removing the
1372 // `wasm.custom_sections` named metadata node from any imported module. This
1373 // we know isn't used by any optimization pass so there's no need for it to
1376 // Note that the metadata is currently lazily loaded, so we materialize it
1377 // here before looking up if there's metadata inside. The `FunctionImporter`
1378 // will immediately materialize metadata anyway after an import, so this
1379 // shouldn't be a perf hit.
1380 if (Error Err = (*MOrErr)->materializeMetadata()) {
1381 Expected<std::unique_ptr<Module>> Ret(std::move(Err));
1385 auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
1386 if (WasmCustomSections)
1387 WasmCustomSections->eraseFromParent();
1391 FunctionImporter Importer(Data->Index, Loader);
1392 Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
1394 LLVMRustSetLastError(toString(Result.takeError()).c_str());
1400 extern "C" typedef void (*LLVMRustModuleNameCallback)(void*, // payload
1401 const char*, // importing module name
1402 const char*); // imported module name
1404 // Calls `module_name_callback` for each module import done by ThinLTO.
1405 // The callback is provided with regular null-terminated C strings.
1407 LLVMRustGetThinLTOModuleImports(const LLVMRustThinLTOData *data,
1408 LLVMRustModuleNameCallback module_name_callback,
1409 void* callback_payload) {
1410 for (const auto& importing_module : data->ImportLists) {
1411 const std::string importing_module_id = importing_module.getKey().str();
1412 const auto& imports = importing_module.getValue();
1413 for (const auto& imported_module : imports) {
1414 const std::string imported_module_id = imported_module.getKey().str();
1415 module_name_callback(callback_payload,
1416 importing_module_id.c_str(),
1417 imported_module_id.c_str());
1422 // This struct and various functions are sort of a hack right now, but the
1423 // problem is that we've got in-memory LLVM modules after we generate and
1424 // optimize all codegen-units for one compilation in rustc. To be compatible
1425 // with the LTO support above we need to serialize the modules plus their
1426 // ThinLTO summary into memory.
1428 // This structure is basically an owned version of a serialize module, with
1429 // a ThinLTO summary attached.
1430 struct LLVMRustThinLTOBuffer {
1434 extern "C" LLVMRustThinLTOBuffer*
1435 LLVMRustThinLTOBufferCreate(LLVMModuleRef M) {
1436 #if LLVM_VERSION_GE(10, 0)
1437 auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
1439 auto Ret = llvm::make_unique<LLVMRustThinLTOBuffer>();
1442 raw_string_ostream OS(Ret->data);
1444 legacy::PassManager PM;
1445 PM.add(createWriteThinLTOBitcodePass(OS));
1449 return Ret.release();
1453 LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
1457 extern "C" const void*
1458 LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
1459 return Buffer->data.data();
1463 LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
1464 return Buffer->data.length();
1467 // This is what we used to parse upstream bitcode for actual ThinLTO
1468 // processing. We'll call this once per module optimized through ThinLTO, and
1469 // it'll be called concurrently on many threads.
1470 extern "C" LLVMModuleRef
1471 LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
1474 const char *identifier) {
1475 StringRef Data(data, len);
1476 MemoryBufferRef Buffer(Data, identifier);
1477 unwrap(Context)->enableDebugTypeODRUniquing();
1478 Expected<std::unique_ptr<Module>> SrcOrError =
1479 parseBitcodeFile(Buffer, *unwrap(Context));
1481 LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
1484 return wrap(std::move(*SrcOrError).release());
1487 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1488 // the comment in `back/lto.rs` for why this exists.
1490 LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
1492 DICompileUnit **B) {
1493 Module *M = unwrap(Mod);
1494 DICompileUnit **Cur = A;
1495 DICompileUnit **Next = B;
1496 for (DICompileUnit *CU : M->debug_compile_units()) {
1505 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1506 // the comment in `back/lto.rs` for why this exists.
1508 LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
1509 Module *M = unwrap(Mod);
1511 // If the original source module didn't have a `DICompileUnit` then try to
1512 // merge all the existing compile units. If there aren't actually any though
1513 // then there's not much for us to do so return.
1514 if (Unit == nullptr) {
1515 for (DICompileUnit *CU : M->debug_compile_units()) {
1519 if (Unit == nullptr)
1523 // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
1524 // process it recursively. Note that we specifically iterate over instructions
1525 // to ensure we feed everything into it.
1526 DebugInfoFinder Finder;
1527 Finder.processModule(*M);
1528 for (Function &F : M->functions()) {
1529 for (auto &FI : F) {
1530 for (Instruction &BI : FI) {
1531 if (auto Loc = BI.getDebugLoc())
1532 Finder.processLocation(*M, Loc);
1533 if (auto DVI = dyn_cast<DbgValueInst>(&BI))
1534 Finder.processValue(*M, DVI);
1535 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1536 Finder.processDeclare(*M, DDI);
1541 // After we've found all our debuginfo, rewrite all subprograms to point to
1542 // the same `DICompileUnit`.
1543 for (auto &F : Finder.subprograms()) {
1544 F->replaceUnit(Unit);
1547 // Erase any other references to other `DICompileUnit` instances, the verifier
1548 // will later ensure that we don't actually have any other stale references to
1550 auto *MD = M->getNamedMetadata("llvm.dbg.cu");
1551 MD->clearOperands();
1552 MD->addOperand(Unit);