8 #include "llvm/Analysis/TargetLibraryInfo.h"
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/CodeGen/TargetSubtargetInfo.h"
11 #include "llvm/InitializePasses.h"
12 #include "llvm/IR/AutoUpgrade.h"
13 #include "llvm/IR/AssemblyAnnotationWriter.h"
14 #include "llvm/IR/IntrinsicInst.h"
15 #include "llvm/IR/Verifier.h"
16 #include "llvm/Passes/PassBuilder.h"
17 #if LLVM_VERSION_GE(9, 0)
18 #include "llvm/Passes/StandardInstrumentations.h"
20 #include "llvm/Support/CBindingWrapping.h"
21 #include "llvm/Support/FileSystem.h"
22 #include "llvm/Support/Host.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
25 #include "llvm/Transforms/IPO/AlwaysInliner.h"
26 #include "llvm/Transforms/IPO/FunctionImport.h"
27 #include "llvm/Transforms/Utils/FunctionImportUtils.h"
28 #include "llvm/LTO/LTO.h"
29 #include "llvm-c/Transforms/PassManagerBuilder.h"
31 #include "llvm/Transforms/Instrumentation.h"
32 #if LLVM_VERSION_GE(9, 0)
33 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
34 #include "llvm/Support/TimeProfiler.h"
36 #if LLVM_VERSION_GE(8, 0)
37 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
38 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
40 #if LLVM_VERSION_GE(9, 0)
41 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
43 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
47 typedef struct LLVMOpaquePass *LLVMPassRef;
48 typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
50 DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
51 DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
52 DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBuilder,
53 LLVMPassManagerBuilderRef)
55 extern "C" void LLVMInitializePasses() {
56 PassRegistry &Registry = *PassRegistry::getPassRegistry();
57 initializeCore(Registry);
58 initializeCodeGen(Registry);
59 initializeScalarOpts(Registry);
60 initializeVectorization(Registry);
61 initializeIPO(Registry);
62 initializeAnalysis(Registry);
63 initializeTransformUtils(Registry);
64 initializeInstCombine(Registry);
65 initializeInstrumentation(Registry);
66 initializeTarget(Registry);
69 extern "C" void LLVMTimeTraceProfilerInitialize() {
70 #if LLVM_VERSION_GE(9, 0)
71 timeTraceProfilerInitialize();
75 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
76 #if LLVM_VERSION_GE(9, 0)
77 StringRef FN(FileName);
79 raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
81 timeTraceProfilerWrite(OS);
82 timeTraceProfilerCleanup();
86 enum class LLVMRustPassKind {
92 static LLVMRustPassKind toRust(PassKind Kind) {
95 return LLVMRustPassKind::Function;
97 return LLVMRustPassKind::Module;
99 return LLVMRustPassKind::Other;
103 extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
104 StringRef SR(PassName);
105 PassRegistry *PR = PassRegistry::getPassRegistry();
107 const PassInfo *PI = PR->getPassInfo(SR);
109 return wrap(PI->createPass());
114 extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
115 const bool CompileKernel = false;
116 const bool UseAfterScope = true;
118 return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
121 extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
122 const bool CompileKernel = false;
124 #if LLVM_VERSION_GE(9, 0)
125 return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
127 return wrap(createAddressSanitizerModulePass(CompileKernel, Recover));
131 extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
132 #if LLVM_VERSION_GE(9, 0)
133 const bool CompileKernel = false;
135 return wrap(createMemorySanitizerLegacyPassPass(
136 MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
137 #elif LLVM_VERSION_GE(8, 0)
138 return wrap(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover));
140 return wrap(createMemorySanitizerPass(TrackOrigins, Recover));
144 extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
145 #if LLVM_VERSION_GE(8, 0)
146 return wrap(createThreadSanitizerLegacyPassPass());
148 return wrap(createThreadSanitizerPass());
152 extern "C" LLVMRustPassKind LLVMRustPassKind(LLVMPassRef RustPass) {
154 Pass *Pass = unwrap(RustPass);
155 return toRust(Pass->getPassKind());
158 extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
160 Pass *Pass = unwrap(RustPass);
161 PassManagerBase *PMB = unwrap(PMR);
166 void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
167 LLVMPassManagerBuilderRef PMBR,
168 LLVMPassManagerRef PMR
170 unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
174 void LLVMRustAddLastExtensionPasses(
175 LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
176 auto AddExtensionPasses = [Passes, NumPasses](
177 const PassManagerBuilder &Builder, PassManagerBase &PM) {
178 for (size_t I = 0; I < NumPasses; I++) {
179 PM.add(unwrap(Passes[I]));
182 // Add the passes to both of the pre-finalization extension points,
183 // so they are run for optimized and non-optimized builds.
184 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
186 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
190 #ifdef LLVM_COMPONENT_X86
191 #define SUBTARGET_X86 SUBTARGET(X86)
193 #define SUBTARGET_X86
196 #ifdef LLVM_COMPONENT_ARM
197 #define SUBTARGET_ARM SUBTARGET(ARM)
199 #define SUBTARGET_ARM
202 #ifdef LLVM_COMPONENT_AARCH64
203 #define SUBTARGET_AARCH64 SUBTARGET(AArch64)
205 #define SUBTARGET_AARCH64
208 #ifdef LLVM_COMPONENT_MIPS
209 #define SUBTARGET_MIPS SUBTARGET(Mips)
211 #define SUBTARGET_MIPS
214 #ifdef LLVM_COMPONENT_POWERPC
215 #define SUBTARGET_PPC SUBTARGET(PPC)
217 #define SUBTARGET_PPC
220 #ifdef LLVM_COMPONENT_SYSTEMZ
221 #define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
223 #define SUBTARGET_SYSTEMZ
226 #ifdef LLVM_COMPONENT_MSP430
227 #define SUBTARGET_MSP430 SUBTARGET(MSP430)
229 #define SUBTARGET_MSP430
232 #ifdef LLVM_COMPONENT_RISCV
233 #define SUBTARGET_RISCV SUBTARGET(RISCV)
235 #define SUBTARGET_RISCV
238 #ifdef LLVM_COMPONENT_SPARC
239 #define SUBTARGET_SPARC SUBTARGET(Sparc)
241 #define SUBTARGET_SPARC
244 #ifdef LLVM_COMPONENT_HEXAGON
245 #define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
247 #define SUBTARGET_HEXAGON
250 #define GEN_SUBTARGETS \
262 #define SUBTARGET(x) \
264 extern const SubtargetFeatureKV x##FeatureKV[]; \
265 extern const SubtargetFeatureKV x##SubTypeKV[]; \
271 extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
272 const char *Feature) {
273 TargetMachine *Target = unwrap(TM);
274 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
275 return MCInfo->checkFeatures(std::string("+") + Feature);
278 enum class LLVMRustCodeModel {
287 static CodeModel::Model fromRust(LLVMRustCodeModel Model) {
289 case LLVMRustCodeModel::Small:
290 return CodeModel::Small;
291 case LLVMRustCodeModel::Kernel:
292 return CodeModel::Kernel;
293 case LLVMRustCodeModel::Medium:
294 return CodeModel::Medium;
295 case LLVMRustCodeModel::Large:
296 return CodeModel::Large;
298 report_fatal_error("Bad CodeModel.");
302 enum class LLVMRustCodeGenOptLevel {
310 static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
312 case LLVMRustCodeGenOptLevel::None:
313 return CodeGenOpt::None;
314 case LLVMRustCodeGenOptLevel::Less:
315 return CodeGenOpt::Less;
316 case LLVMRustCodeGenOptLevel::Default:
317 return CodeGenOpt::Default;
318 case LLVMRustCodeGenOptLevel::Aggressive:
319 return CodeGenOpt::Aggressive;
321 report_fatal_error("Bad CodeGenOptLevel.");
325 enum class LLVMRustPassBuilderOptLevel {
334 static PassBuilder::OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
336 case LLVMRustPassBuilderOptLevel::O0:
337 return PassBuilder::O0;
338 case LLVMRustPassBuilderOptLevel::O1:
339 return PassBuilder::O1;
340 case LLVMRustPassBuilderOptLevel::O2:
341 return PassBuilder::O2;
342 case LLVMRustPassBuilderOptLevel::O3:
343 return PassBuilder::O3;
344 case LLVMRustPassBuilderOptLevel::Os:
345 return PassBuilder::Os;
346 case LLVMRustPassBuilderOptLevel::Oz:
347 return PassBuilder::Oz;
349 report_fatal_error("Bad PassBuilderOptLevel.");
353 enum class LLVMRustRelocMode {
363 static Optional<Reloc::Model> fromRust(LLVMRustRelocMode RustReloc) {
365 case LLVMRustRelocMode::Default:
367 case LLVMRustRelocMode::Static:
368 return Reloc::Static;
369 case LLVMRustRelocMode::PIC:
371 case LLVMRustRelocMode::DynamicNoPic:
372 return Reloc::DynamicNoPIC;
373 case LLVMRustRelocMode::ROPI:
375 case LLVMRustRelocMode::RWPI:
377 case LLVMRustRelocMode::ROPIRWPI:
378 return Reloc::ROPI_RWPI;
380 report_fatal_error("Bad RelocModel.");
384 /// getLongestEntryLength - Return the length of the longest entry in the table.
385 template<typename KV>
386 static size_t getLongestEntryLength(ArrayRef<KV> Table) {
388 for (auto &I : Table)
389 MaxLen = std::max(MaxLen, std::strlen(I.Key));
393 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
394 const TargetMachine *Target = unwrap(TM);
395 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
396 const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
397 const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
398 const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
399 unsigned MaxCPULen = getLongestEntryLength(CPUTable);
401 printf("Available CPUs for this target:\n");
402 if (HostArch == TargetArch) {
403 const StringRef HostCPU = sys::getHostCPUName();
404 printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
405 MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
407 for (auto &CPU : CPUTable)
408 printf(" %-*s\n", MaxCPULen, CPU.Key);
412 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef TM) {
413 const TargetMachine *Target = unwrap(TM);
414 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
415 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
416 unsigned MaxFeatLen = getLongestEntryLength(FeatTable);
418 printf("Available features for this target:\n");
419 for (auto &Feature : FeatTable)
420 printf(" %-*s - %s.\n", MaxFeatLen, Feature.Key, Feature.Desc);
423 printf("Use +feature to enable a feature, or -feature to disable it.\n"
424 "For example, rustc -C -target-cpu=mycpu -C "
425 "target-feature=+feature1,-feature2\n\n");
430 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
431 printf("Target CPU help is not supported by this LLVM version.\n\n");
434 extern "C" void LLVMRustPrintTargetFeatures(LLVMTargetMachineRef) {
435 printf("Target features help is not supported by this LLVM version.\n\n");
439 extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
440 StringRef Name = sys::getHostCPUName();
445 extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
446 const char *TripleStr, const char *CPU, const char *Feature,
447 const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocMode RustReloc,
448 LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
449 bool PositionIndependentExecutable, bool FunctionSections,
451 bool TrapUnreachable,
454 bool EmitStackSizeSection,
455 bool RelaxELFRelocations) {
457 auto OptLevel = fromRust(RustOptLevel);
458 auto RM = fromRust(RustReloc);
461 Triple Trip(Triple::normalize(TripleStr));
462 const llvm::Target *TheTarget =
463 TargetRegistry::lookupTarget(Trip.getTriple(), Error);
464 if (TheTarget == nullptr) {
465 LLVMRustSetLastError(Error.c_str());
469 TargetOptions Options;
471 Options.FloatABIType = FloatABI::Default;
473 Options.FloatABIType = FloatABI::Soft;
475 Options.DataSections = DataSections;
476 Options.FunctionSections = FunctionSections;
477 Options.MCOptions.AsmVerbose = AsmComments;
478 Options.MCOptions.PreserveAsmComments = AsmComments;
479 Options.MCOptions.ABIName = ABIStr;
480 Options.RelaxELFRelocations = RelaxELFRelocations;
482 if (TrapUnreachable) {
483 // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
484 // This limits the extent of possible undefined behavior in some cases, as
485 // it prevents control flow from "falling through" into whatever code
486 // happens to be laid out next in memory.
487 Options.TrapUnreachable = true;
491 Options.ThreadModel = ThreadModel::Single;
494 Options.EmitStackSizeSection = EmitStackSizeSection;
496 Optional<CodeModel::Model> CM;
497 if (RustCM != LLVMRustCodeModel::None)
498 CM = fromRust(RustCM);
499 TargetMachine *TM = TheTarget->createTargetMachine(
500 Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
504 extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
508 extern "C" void LLVMRustConfigurePassManagerBuilder(
509 LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
510 bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
511 const char* PGOGenPath, const char* PGOUsePath) {
512 unwrap(PMBR)->MergeFunctions = MergeFunctions;
513 unwrap(PMBR)->SLPVectorize = SLPVectorize;
514 unwrap(PMBR)->OptLevel = fromRust(OptLevel);
515 unwrap(PMBR)->LoopVectorize = LoopVectorize;
516 unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
520 unwrap(PMBR)->EnablePGOInstrGen = true;
521 unwrap(PMBR)->PGOInstrGen = PGOGenPath;
525 unwrap(PMBR)->PGOInstrUse = PGOUsePath;
529 // Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
530 // field of a PassManagerBuilder, we expose our own method of doing so.
531 extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
533 bool DisableSimplifyLibCalls) {
534 Triple TargetTriple(unwrap(M)->getTargetTriple());
535 TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
536 if (DisableSimplifyLibCalls)
537 TLI->disableAllFunctions();
538 unwrap(PMBR)->LibraryInfo = TLI;
541 // Unfortunately, the LLVM C API doesn't provide a way to create the
542 // TargetLibraryInfo pass, so we use this method to do so.
543 extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
544 bool DisableSimplifyLibCalls) {
545 Triple TargetTriple(unwrap(M)->getTargetTriple());
546 TargetLibraryInfoImpl TLII(TargetTriple);
547 if (DisableSimplifyLibCalls)
548 TLII.disableAllFunctions();
549 unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
552 // Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
553 // all the functions in a module, so we do that manually here. You'll find
554 // similar code in clang's BackendUtil.cpp file.
555 extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
557 llvm::legacy::FunctionPassManager *P =
558 unwrap<llvm::legacy::FunctionPassManager>(PMR);
559 P->doInitialization();
561 // Upgrade all calls to old intrinsics first.
562 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
563 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
565 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
567 if (!I->isDeclaration())
573 extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
574 // Initializing the command-line options more than once is not allowed. So,
575 // check if they've already been initialized. (This could happen if we're
576 // being called from rustpkg, for example). If the arguments change, then
577 // that's just kinda unfortunate.
578 static bool Initialized = false;
582 cl::ParseCommandLineOptions(Argc, Argv);
585 enum class LLVMRustFileType {
591 #if LLVM_VERSION_GE(10, 0)
592 static CodeGenFileType fromRust(LLVMRustFileType Type) {
594 case LLVMRustFileType::AssemblyFile:
595 return CGFT_AssemblyFile;
596 case LLVMRustFileType::ObjectFile:
597 return CGFT_ObjectFile;
599 report_fatal_error("Bad FileType.");
603 static TargetMachine::CodeGenFileType fromRust(LLVMRustFileType Type) {
605 case LLVMRustFileType::AssemblyFile:
606 return TargetMachine::CGFT_AssemblyFile;
607 case LLVMRustFileType::ObjectFile:
608 return TargetMachine::CGFT_ObjectFile;
610 report_fatal_error("Bad FileType.");
615 extern "C" LLVMRustResult
616 LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
617 LLVMModuleRef M, const char *Path,
618 LLVMRustFileType RustFileType) {
619 llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
620 auto FileType = fromRust(RustFileType);
622 std::string ErrorInfo;
624 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
626 ErrorInfo = EC.message();
627 if (ErrorInfo != "") {
628 LLVMRustSetLastError(ErrorInfo.c_str());
629 return LLVMRustResult::Failure;
632 buffer_ostream BOS(OS);
633 unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
636 // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
637 // stream (OS), so the only real safe place to delete this is here? Don't we
638 // wish this was written in Rust?
639 LLVMDisposePassManager(PMR);
640 return LLVMRustResult::Success;
643 enum class LLVMRustOptStage {
651 struct LLVMRustSanitizerOptions {
654 bool SanitizeAddress;
655 bool SanitizeRecover;
656 int SanitizeMemoryTrackOrigins;
660 LLVMRustOptimizeWithNewPassManager(
661 LLVMModuleRef ModuleRef,
662 LLVMTargetMachineRef TMRef,
663 LLVMRustPassBuilderOptLevel OptLevelRust,
664 LLVMRustOptStage OptStage,
665 bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
666 bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
667 bool DisableSimplifyLibCalls,
668 LLVMRustSanitizerOptions *SanitizerOptions,
669 const char *PGOGenPath, const char *PGOUsePath) {
670 #if LLVM_VERSION_GE(9, 0)
671 Module *TheModule = unwrap(ModuleRef);
672 TargetMachine *TM = unwrap(TMRef);
673 PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
675 // FIXME: MergeFunctions is not supported by NewPM yet.
676 (void) MergeFunctions;
678 PipelineTuningOptions PTO;
679 PTO.LoopUnrolling = UnrollLoops;
680 PTO.LoopInterleaving = UnrollLoops;
681 PTO.LoopVectorization = LoopVectorize;
682 PTO.SLPVectorization = SLPVectorize;
684 PassInstrumentationCallbacks PIC;
685 StandardInstrumentations SI;
686 SI.registerCallbacks(PIC);
688 Optional<PGOOptions> PGOOpt;
691 PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr);
692 } else if (PGOUsePath) {
694 PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse);
697 PassBuilder PB(TM, PTO, PGOOpt, &PIC);
699 // FIXME: We may want to expose this as an option.
700 bool DebugPassManager = false;
701 LoopAnalysisManager LAM(DebugPassManager);
702 FunctionAnalysisManager FAM(DebugPassManager);
703 CGSCCAnalysisManager CGAM(DebugPassManager);
704 ModuleAnalysisManager MAM(DebugPassManager);
706 FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
708 Triple TargetTriple(TheModule->getTargetTriple());
709 std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
710 if (DisableSimplifyLibCalls)
711 TLII->disableAllFunctions();
712 FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
714 PB.registerModuleAnalyses(MAM);
715 PB.registerCGSCCAnalyses(CGAM);
716 PB.registerFunctionAnalyses(FAM);
717 PB.registerLoopAnalyses(LAM);
718 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
720 // We manually collect pipeline callbacks so we can apply them at O0, where the
721 // PassBuilder does not create a pipeline.
722 std::vector<std::function<void(ModulePassManager &)>> PipelineStartEPCallbacks;
723 std::vector<std::function<void(FunctionPassManager &, PassBuilder::OptimizationLevel)>>
724 OptimizerLastEPCallbacks;
727 PipelineStartEPCallbacks.push_back([VerifyIR](ModulePassManager &MPM) {
728 MPM.addPass(VerifierPass());
732 if (SanitizerOptions) {
733 if (SanitizerOptions->SanitizeMemory) {
734 MemorySanitizerOptions Options(
735 SanitizerOptions->SanitizeMemoryTrackOrigins,
736 SanitizerOptions->SanitizeRecover,
737 /*CompileKernel=*/false);
738 #if LLVM_VERSION_GE(10, 0)
739 PipelineStartEPCallbacks.push_back([Options](ModulePassManager &MPM) {
740 MPM.addPass(MemorySanitizerPass(Options));
743 OptimizerLastEPCallbacks.push_back(
744 [Options](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
745 FPM.addPass(MemorySanitizerPass(Options));
750 if (SanitizerOptions->SanitizeThread) {
751 #if LLVM_VERSION_GE(10, 0)
752 PipelineStartEPCallbacks.push_back([](ModulePassManager &MPM) {
753 MPM.addPass(ThreadSanitizerPass());
756 OptimizerLastEPCallbacks.push_back(
757 [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
758 FPM.addPass(ThreadSanitizerPass());
763 if (SanitizerOptions->SanitizeAddress) {
764 // FIXME: Rust does not expose the UseAfterScope option.
765 PipelineStartEPCallbacks.push_back([&](ModulePassManager &MPM) {
766 MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
768 OptimizerLastEPCallbacks.push_back(
769 [SanitizerOptions](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
770 FPM.addPass(AddressSanitizerPass(
771 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover));
774 PipelineStartEPCallbacks.push_back(
775 [SanitizerOptions](ModulePassManager &MPM) {
776 MPM.addPass(ModuleAddressSanitizerPass(
777 /*CompileKernel=*/false, SanitizerOptions->SanitizeRecover));
783 ModulePassManager MPM(DebugPassManager);
784 if (!NoPrepopulatePasses) {
785 if (OptLevel == PassBuilder::O0) {
786 for (const auto &C : PipelineStartEPCallbacks)
789 if (!OptimizerLastEPCallbacks.empty()) {
790 FunctionPassManager FPM(DebugPassManager);
791 for (const auto &C : OptimizerLastEPCallbacks)
793 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
796 MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
798 #if LLVM_VERSION_GE(10, 0)
800 PB.addPGOInstrPassesForO0(
801 MPM, DebugPassManager, PGOOpt->Action == PGOOptions::IRInstr,
802 /*IsCS=*/false, PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
806 for (const auto &C : PipelineStartEPCallbacks)
807 PB.registerPipelineStartEPCallback(C);
808 for (const auto &C : OptimizerLastEPCallbacks)
809 PB.registerOptimizerLastEPCallback(C);
812 case LLVMRustOptStage::PreLinkNoLTO:
813 MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
815 case LLVMRustOptStage::PreLinkThinLTO:
816 MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
818 case LLVMRustOptStage::PreLinkFatLTO:
819 MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel, DebugPassManager);
821 case LLVMRustOptStage::ThinLTO:
822 // FIXME: Does it make sense to pass the ModuleSummaryIndex?
823 // It only seems to be needed for C++ specific optimizations.
824 MPM = PB.buildThinLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
826 case LLVMRustOptStage::FatLTO:
827 MPM = PB.buildLTODefaultPipeline(OptLevel, DebugPassManager, nullptr);
833 if (UseThinLTOBuffers) {
834 MPM.addPass(CanonicalizeAliasesPass());
835 MPM.addPass(NameAnonGlobalPass());
838 // Upgrade all calls to old intrinsics first.
839 for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
840 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
842 MPM.run(*TheModule, MAM);
844 // The new pass manager has been available for a long time,
845 // but we don't bother supporting it on old LLVM versions.
846 report_fatal_error("New pass manager only supported since LLVM 9");
850 // Callback to demangle function name
852 // * name to be demangled
855 // * output buffer len
856 // Returns len of demangled string, or 0 if demangle failed.
857 typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
862 class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
864 std::vector<char> Buf;
867 RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
869 // Return empty string if demangle failed
870 // or if name does not need to be demangled
871 StringRef CallDemangle(StringRef name) {
876 if (Buf.size() < name.size() * 2) {
877 // Semangled name usually shorter than mangled,
878 // but allocate twice as much memory just in case
879 Buf.resize(name.size() * 2);
882 auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
888 auto Demangled = StringRef(Buf.data(), R);
889 if (Demangled == name) {
890 // Do not print anything if demangled name is equal to mangled.
897 void emitFunctionAnnot(const Function *F,
898 formatted_raw_ostream &OS) override {
899 StringRef Demangled = CallDemangle(F->getName());
900 if (Demangled.empty()) {
904 OS << "; " << Demangled << "\n";
907 void emitInstructionAnnot(const Instruction *I,
908 formatted_raw_ostream &OS) override {
911 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
913 Value = CI->getCalledValue();
914 } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
916 Value = II->getCalledValue();
918 // Could demangle more operations, e. g.
919 // `store %place, @function`.
923 if (!Value->hasName()) {
927 StringRef Demangled = CallDemangle(Value->getName());
928 if (Demangled.empty()) {
932 OS << "; " << Name << " " << Demangled << "\n";
938 extern "C" LLVMRustResult
939 LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
940 std::string ErrorInfo;
942 raw_fd_ostream OS(Path, EC, sys::fs::F_None);
944 ErrorInfo = EC.message();
945 if (ErrorInfo != "") {
946 LLVMRustSetLastError(ErrorInfo.c_str());
947 return LLVMRustResult::Failure;
950 RustAssemblyAnnotationWriter AAW(Demangle);
951 formatted_raw_ostream FOS(OS);
952 unwrap(M)->print(FOS, &AAW);
954 return LLVMRustResult::Success;
957 extern "C" void LLVMRustPrintPasses() {
958 LLVMInitializePasses();
959 struct MyListener : PassRegistrationListener {
960 void passEnumerate(const PassInfo *Info) {
961 StringRef PassArg = Info->getPassArgument();
962 StringRef PassName = Info->getPassName();
963 if (!PassArg.empty()) {
964 // These unsigned->signed casts could theoretically overflow, but
965 // realistically never will (and even if, the result is implementation
966 // defined rather plain UB).
967 printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
968 (int)PassName.size(), PassName.data());
973 PassRegistry *PR = PassRegistry::getPassRegistry();
974 PR->enumerateWith(&Listener);
977 extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
979 unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
982 extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
984 llvm::legacy::PassManager passes;
986 auto PreserveFunctions = [=](const GlobalValue &GV) {
987 for (size_t I = 0; I < Len; I++) {
988 if (GV.getName() == Symbols[I]) {
995 passes.add(llvm::createInternalizePass(PreserveFunctions));
997 passes.run(*unwrap(M));
1000 extern "C" void LLVMRustMarkAllFunctionsNounwind(LLVMModuleRef M) {
1001 for (Module::iterator GV = unwrap(M)->begin(), E = unwrap(M)->end(); GV != E;
1003 GV->setDoesNotThrow();
1004 Function *F = dyn_cast<Function>(GV);
1008 for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
1009 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
1010 if (isa<InvokeInst>(I)) {
1011 InvokeInst *CI = cast<InvokeInst>(I);
1012 CI->setDoesNotThrow();
1020 LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
1021 LLVMTargetMachineRef TMR) {
1022 TargetMachine *Target = unwrap(TMR);
1023 unwrap(Module)->setDataLayout(Target->createDataLayout());
1026 extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
1027 unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
1030 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
1031 unwrap(M)->setPIELevel(PIELevel::Level::Large);
1034 // Here you'll find an implementation of ThinLTO as used by the Rust compiler
1035 // right now. This ThinLTO support is only enabled on "recent ish" versions of
1036 // LLVM, and otherwise it's just blanket rejected from other compilers.
1038 // Most of this implementation is straight copied from LLVM. At the time of
1039 // this writing it wasn't *quite* suitable to reuse more code from upstream
1040 // for our purposes, but we should strive to upstream this support once it's
1041 // ready to go! I figure we may want a bit of testing locally first before
1042 // sending this upstream to LLVM. I hear though they're quite eager to receive
1043 // feedback like this!
1045 // If you're reading this code and wondering "what in the world" or you're
1046 // working "good lord by LLVM upgrade is *still* failing due to these bindings"
1047 // then fear not! (ok maybe fear a little). All code here is mostly based
1048 // on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
1050 // You'll find that the general layout here roughly corresponds to the `run`
1051 // method in that file as well as `ProcessThinLTOModule`. Functions are
1052 // specifically commented below as well, but if you're updating this code
1053 // or otherwise trying to understand it, the LLVM source will be useful in
1054 // interpreting the mysteries within.
1056 // Otherwise I'll apologize in advance, it probably requires a relatively
1057 // significant investment on your part to "truly understand" what's going on
1058 // here. Not saying I do myself, but it took me awhile staring at LLVM's source
1059 // and various online resources about ThinLTO to make heads or tails of all
1062 // This is a shared data structure which *must* be threadsafe to share
1063 // read-only amongst threads. This also corresponds basically to the arguments
1064 // of the `ProcessThinLTOModule` function in the LLVM source.
1065 struct LLVMRustThinLTOData {
1066 // The combined index that is the global analysis over all modules we're
1067 // performing ThinLTO for. This is mostly managed by LLVM.
1068 ModuleSummaryIndex Index;
1070 // All modules we may look at, stored as in-memory serialized versions. This
1071 // is later used when inlining to ensure we can extract any module to inline
1073 StringMap<MemoryBufferRef> ModuleMap;
1075 // A set that we manage of everything we *don't* want internalized. Note that
1076 // this includes all transitive references right now as well, but it may not
1078 DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
1080 // Not 100% sure what these are, but they impact what's internalized and
1081 // what's inlined across modules, I believe.
1082 StringMap<FunctionImporter::ImportMapTy> ImportLists;
1083 StringMap<FunctionImporter::ExportSetTy> ExportLists;
1084 StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
1086 LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
1089 // Just an argument to the `LLVMRustCreateThinLTOData` function below.
1090 struct LLVMRustThinLTOModule {
1091 const char *identifier;
1096 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
1098 static const GlobalValueSummary *
1099 getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
1100 auto StrongDefForLinker = llvm::find_if(
1101 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1102 auto Linkage = Summary->linkage();
1103 return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
1104 !GlobalValue::isWeakForLinker(Linkage);
1106 if (StrongDefForLinker != GVSummaryList.end())
1107 return StrongDefForLinker->get();
1109 auto FirstDefForLinker = llvm::find_if(
1110 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1111 auto Linkage = Summary->linkage();
1112 return !GlobalValue::isAvailableExternallyLinkage(Linkage);
1114 if (FirstDefForLinker == GVSummaryList.end())
1116 return FirstDefForLinker->get();
1119 // The main entry point for creating the global ThinLTO analysis. The structure
1120 // here is basically the same as before threads are spawned in the `run`
1121 // function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
1122 extern "C" LLVMRustThinLTOData*
1123 LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
1125 const char **preserved_symbols,
1127 #if LLVM_VERSION_GE(10, 0)
1128 auto Ret = std::make_unique<LLVMRustThinLTOData>();
1130 auto Ret = llvm::make_unique<LLVMRustThinLTOData>();
1133 // Load each module's summary and merge it into one combined index
1134 for (int i = 0; i < num_modules; i++) {
1135 auto module = &modules[i];
1136 StringRef buffer(module->data, module->len);
1137 MemoryBufferRef mem_buffer(buffer, module->identifier);
1139 Ret->ModuleMap[module->identifier] = mem_buffer;
1141 if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
1142 LLVMRustSetLastError(toString(std::move(Err)).c_str());
1147 // Collect for each module the list of function it defines (GUID -> Summary)
1148 Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
1150 // Convert the preserved symbols set from string to GUID, this is then needed
1151 // for internalization.
1152 for (int i = 0; i < num_symbols; i++) {
1153 auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
1154 Ret->GUIDPreservedSymbols.insert(GUID);
1157 // Collect the import/export lists for all modules from the call-graph in the
1160 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
1161 auto deadIsPrevailing = [&](GlobalValue::GUID G) {
1162 return PrevailingType::Unknown;
1164 #if LLVM_VERSION_GE(8, 0)
1165 // We don't have a complete picture in our use of ThinLTO, just our immediate
1166 // crate, so we need `ImportEnabled = false` to limit internalization.
1167 // Otherwise, we sometimes lose `static` values -- see #60184.
1168 computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
1169 deadIsPrevailing, /* ImportEnabled = */ false);
1171 computeDeadSymbols(Ret->Index, Ret->GUIDPreservedSymbols, deadIsPrevailing);
1173 ComputeCrossModuleImport(
1175 Ret->ModuleToDefinedGVSummaries,
1180 // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
1181 // impacts the caching.
1183 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
1184 // being lifted from `lib/LTO/LTO.cpp` as well
1185 StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
1186 DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
1187 for (auto &I : Ret->Index) {
1188 if (I.second.SummaryList.size() > 1)
1189 PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
1191 auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
1192 const auto &Prevailing = PrevailingCopy.find(GUID);
1193 if (Prevailing == PrevailingCopy.end())
1195 return Prevailing->second == S;
1197 auto recordNewLinkage = [&](StringRef ModuleIdentifier,
1198 GlobalValue::GUID GUID,
1199 GlobalValue::LinkageTypes NewLinkage) {
1200 ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
1202 #if LLVM_VERSION_GE(9, 0)
1203 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
1204 Ret->GUIDPreservedSymbols);
1205 #elif LLVM_VERSION_GE(8, 0)
1206 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1208 thinLTOResolveWeakForLinkerInIndex(Ret->Index, isPrevailing, recordNewLinkage);
1211 // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
1212 // callback below. This callback below will dictate the linkage for all
1213 // summaries in the index, and we basically just only want to ensure that dead
1214 // symbols are internalized. Otherwise everything that's already external
1215 // linkage will stay as external, and internal will stay as internal.
1216 std::set<GlobalValue::GUID> ExportedGUIDs;
1217 for (auto &List : Ret->Index) {
1218 for (auto &GVS: List.second.SummaryList) {
1219 if (GlobalValue::isLocalLinkage(GVS->linkage()))
1221 auto GUID = GVS->getOriginalName();
1222 if (GVS->flags().Live)
1223 ExportedGUIDs.insert(GUID);
1226 #if LLVM_VERSION_GE(10, 0)
1227 auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
1228 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1229 return (ExportList != Ret->ExportLists.end() &&
1230 ExportList->second.count(VI)) ||
1231 ExportedGUIDs.count(VI.getGUID());
1233 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
1235 auto isExported = [&](StringRef ModuleIdentifier, GlobalValue::GUID GUID) {
1236 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1237 return (ExportList != Ret->ExportLists.end() &&
1238 ExportList->second.count(GUID)) ||
1239 ExportedGUIDs.count(GUID);
1241 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported);
1244 return Ret.release();
1248 LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
1252 // Below are the various passes that happen *per module* when doing ThinLTO.
1254 // In other words, these are the functions that are all run concurrently
1255 // with one another, one per module. The passes here correspond to the analysis
1256 // passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
1257 // `ProcessThinLTOModule` function. Here they're split up into separate steps
1258 // so rustc can save off the intermediate bytecode between each step.
1261 LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1262 Module &Mod = *unwrap(M);
1263 if (renameModuleForThinLTO(Mod, Data->Index)) {
1264 LLVMRustSetLastError("renameModuleForThinLTO failed");
1271 LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1272 Module &Mod = *unwrap(M);
1273 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1274 #if LLVM_VERSION_GE(8, 0)
1275 thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
1277 thinLTOResolveWeakForLinkerModule(Mod, DefinedGlobals);
1283 LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1284 Module &Mod = *unwrap(M);
1285 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1286 thinLTOInternalizeModule(Mod, DefinedGlobals);
1291 LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1292 Module &Mod = *unwrap(M);
1294 const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
1295 auto Loader = [&](StringRef Identifier) {
1296 const auto &Memory = Data->ModuleMap.lookup(Identifier);
1297 auto &Context = Mod.getContext();
1298 auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
1303 // The rest of this closure is a workaround for
1304 // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
1305 // we accidentally import wasm custom sections into different modules,
1306 // duplicating them by in the final output artifact.
1308 // The issue is worked around here by manually removing the
1309 // `wasm.custom_sections` named metadata node from any imported module. This
1310 // we know isn't used by any optimization pass so there's no need for it to
1313 // Note that the metadata is currently lazily loaded, so we materialize it
1314 // here before looking up if there's metadata inside. The `FunctionImporter`
1315 // will immediately materialize metadata anyway after an import, so this
1316 // shouldn't be a perf hit.
1317 if (Error Err = (*MOrErr)->materializeMetadata()) {
1318 Expected<std::unique_ptr<Module>> Ret(std::move(Err));
1322 auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
1323 if (WasmCustomSections)
1324 WasmCustomSections->eraseFromParent();
1328 FunctionImporter Importer(Data->Index, Loader);
1329 Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
1331 LLVMRustSetLastError(toString(Result.takeError()).c_str());
1337 extern "C" typedef void (*LLVMRustModuleNameCallback)(void*, // payload
1338 const char*, // importing module name
1339 const char*); // imported module name
1341 // Calls `module_name_callback` for each module import done by ThinLTO.
1342 // The callback is provided with regular null-terminated C strings.
1344 LLVMRustGetThinLTOModuleImports(const LLVMRustThinLTOData *data,
1345 LLVMRustModuleNameCallback module_name_callback,
1346 void* callback_payload) {
1347 for (const auto& importing_module : data->ImportLists) {
1348 const std::string importing_module_id = importing_module.getKey().str();
1349 const auto& imports = importing_module.getValue();
1350 for (const auto& imported_module : imports) {
1351 const std::string imported_module_id = imported_module.getKey().str();
1352 module_name_callback(callback_payload,
1353 importing_module_id.c_str(),
1354 imported_module_id.c_str());
1359 // This struct and various functions are sort of a hack right now, but the
1360 // problem is that we've got in-memory LLVM modules after we generate and
1361 // optimize all codegen-units for one compilation in rustc. To be compatible
1362 // with the LTO support above we need to serialize the modules plus their
1363 // ThinLTO summary into memory.
1365 // This structure is basically an owned version of a serialize module, with
1366 // a ThinLTO summary attached.
1367 struct LLVMRustThinLTOBuffer {
1371 extern "C" LLVMRustThinLTOBuffer*
1372 LLVMRustThinLTOBufferCreate(LLVMModuleRef M) {
1373 #if LLVM_VERSION_GE(10, 0)
1374 auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
1376 auto Ret = llvm::make_unique<LLVMRustThinLTOBuffer>();
1379 raw_string_ostream OS(Ret->data);
1381 legacy::PassManager PM;
1382 PM.add(createWriteThinLTOBitcodePass(OS));
1386 return Ret.release();
1390 LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
1394 extern "C" const void*
1395 LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
1396 return Buffer->data.data();
1400 LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
1401 return Buffer->data.length();
1404 // This is what we used to parse upstream bitcode for actual ThinLTO
1405 // processing. We'll call this once per module optimized through ThinLTO, and
1406 // it'll be called concurrently on many threads.
1407 extern "C" LLVMModuleRef
1408 LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
1411 const char *identifier) {
1412 StringRef Data(data, len);
1413 MemoryBufferRef Buffer(Data, identifier);
1414 unwrap(Context)->enableDebugTypeODRUniquing();
1415 Expected<std::unique_ptr<Module>> SrcOrError =
1416 parseBitcodeFile(Buffer, *unwrap(Context));
1418 LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
1421 return wrap(std::move(*SrcOrError).release());
1424 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1425 // the comment in `back/lto.rs` for why this exists.
1427 LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
1429 DICompileUnit **B) {
1430 Module *M = unwrap(Mod);
1431 DICompileUnit **Cur = A;
1432 DICompileUnit **Next = B;
1433 for (DICompileUnit *CU : M->debug_compile_units()) {
1442 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1443 // the comment in `back/lto.rs` for why this exists.
1445 LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
1446 Module *M = unwrap(Mod);
1448 // If the original source module didn't have a `DICompileUnit` then try to
1449 // merge all the existing compile units. If there aren't actually any though
1450 // then there's not much for us to do so return.
1451 if (Unit == nullptr) {
1452 for (DICompileUnit *CU : M->debug_compile_units()) {
1456 if (Unit == nullptr)
1460 // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
1461 // process it recursively. Note that we specifically iterate over instructions
1462 // to ensure we feed everything into it.
1463 DebugInfoFinder Finder;
1464 Finder.processModule(*M);
1465 for (Function &F : M->functions()) {
1466 for (auto &FI : F) {
1467 for (Instruction &BI : FI) {
1468 if (auto Loc = BI.getDebugLoc())
1469 Finder.processLocation(*M, Loc);
1470 if (auto DVI = dyn_cast<DbgValueInst>(&BI))
1471 Finder.processValue(*M, DVI);
1472 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1473 Finder.processDeclare(*M, DDI);
1478 // After we've found all our debuginfo, rewrite all subprograms to point to
1479 // the same `DICompileUnit`.
1480 for (auto &F : Finder.subprograms()) {
1481 F->replaceUnit(Unit);
1484 // Erase any other references to other `DICompileUnit` instances, the verifier
1485 // will later ensure that we don't actually have any other stale references to
1487 auto *MD = M->getNamedMetadata("llvm.dbg.cu");
1488 MD->clearOperands();
1489 MD->addOperand(Unit);