6 #include "LLVMWrapper.h"
8 #include "llvm/Analysis/AliasAnalysis.h"
9 #include "llvm/Analysis/TargetLibraryInfo.h"
10 #include "llvm/Analysis/TargetTransformInfo.h"
11 #include "llvm/CodeGen/TargetSubtargetInfo.h"
12 #include "llvm/InitializePasses.h"
13 #include "llvm/IR/AutoUpgrade.h"
14 #include "llvm/IR/AssemblyAnnotationWriter.h"
15 #include "llvm/IR/IntrinsicInst.h"
16 #include "llvm/IR/Verifier.h"
17 #include "llvm/Object/ObjectFile.h"
18 #include "llvm/Object/IRObjectFile.h"
19 #include "llvm/Passes/PassBuilder.h"
20 #include "llvm/Passes/PassPlugin.h"
21 #include "llvm/Passes/StandardInstrumentations.h"
22 #include "llvm/Support/CBindingWrapping.h"
23 #include "llvm/Support/FileSystem.h"
24 #include "llvm/Support/Host.h"
25 #if LLVM_VERSION_LT(14, 0)
26 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/MC/TargetRegistry.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
32 #include "llvm/Transforms/IPO/AlwaysInliner.h"
33 #include "llvm/Transforms/IPO/FunctionImport.h"
34 #include "llvm/Transforms/IPO/Internalize.h"
35 #include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
36 #include "llvm/Transforms/Utils/AddDiscriminators.h"
37 #include "llvm/Transforms/Utils/FunctionImportUtils.h"
38 #include "llvm/LTO/LTO.h"
39 #include "llvm/Bitcode/BitcodeWriter.h"
40 #include "llvm-c/Transforms/PassManagerBuilder.h"
42 #include "llvm/Transforms/Instrumentation.h"
43 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
44 #include "llvm/Support/TimeProfiler.h"
45 #include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
46 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
47 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
48 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
49 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
50 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
51 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
52 #include "llvm/Transforms/Utils.h"
56 typedef struct LLVMOpaquePass *LLVMPassRef;
57 typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
59 DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
60 DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
62 extern "C" void LLVMInitializePasses() {
63 PassRegistry &Registry = *PassRegistry::getPassRegistry();
64 initializeCore(Registry);
65 initializeCodeGen(Registry);
66 initializeScalarOpts(Registry);
67 initializeVectorization(Registry);
68 initializeIPO(Registry);
69 initializeAnalysis(Registry);
70 initializeTransformUtils(Registry);
71 initializeInstCombine(Registry);
72 #if LLVM_VERSION_LT(16, 0)
73 initializeInstrumentation(Registry);
75 initializeTarget(Registry);
78 extern "C" void LLVMTimeTraceProfilerInitialize() {
79 timeTraceProfilerInitialize(
80 /* TimeTraceGranularity */ 0,
81 /* ProcName */ "rustc");
84 extern "C" void LLVMTimeTraceProfilerFinishThread() {
85 timeTraceProfilerFinishThread();
88 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
89 StringRef FN(FileName);
91 raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
93 timeTraceProfilerWrite(OS);
94 timeTraceProfilerCleanup();
97 #ifdef LLVM_COMPONENT_X86
98 #define SUBTARGET_X86 SUBTARGET(X86)
100 #define SUBTARGET_X86
103 #ifdef LLVM_COMPONENT_ARM
104 #define SUBTARGET_ARM SUBTARGET(ARM)
106 #define SUBTARGET_ARM
109 #ifdef LLVM_COMPONENT_AARCH64
110 #define SUBTARGET_AARCH64 SUBTARGET(AArch64)
112 #define SUBTARGET_AARCH64
115 #ifdef LLVM_COMPONENT_AVR
116 #define SUBTARGET_AVR SUBTARGET(AVR)
118 #define SUBTARGET_AVR
121 #ifdef LLVM_COMPONENT_M68k
122 #define SUBTARGET_M68K SUBTARGET(M68k)
124 #define SUBTARGET_M68K
127 #ifdef LLVM_COMPONENT_MIPS
128 #define SUBTARGET_MIPS SUBTARGET(Mips)
130 #define SUBTARGET_MIPS
133 #ifdef LLVM_COMPONENT_POWERPC
134 #define SUBTARGET_PPC SUBTARGET(PPC)
136 #define SUBTARGET_PPC
139 #ifdef LLVM_COMPONENT_SYSTEMZ
140 #define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
142 #define SUBTARGET_SYSTEMZ
145 #ifdef LLVM_COMPONENT_MSP430
146 #define SUBTARGET_MSP430 SUBTARGET(MSP430)
148 #define SUBTARGET_MSP430
151 #ifdef LLVM_COMPONENT_RISCV
152 #define SUBTARGET_RISCV SUBTARGET(RISCV)
154 #define SUBTARGET_RISCV
157 #ifdef LLVM_COMPONENT_SPARC
158 #define SUBTARGET_SPARC SUBTARGET(Sparc)
160 #define SUBTARGET_SPARC
163 #ifdef LLVM_COMPONENT_HEXAGON
164 #define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
166 #define SUBTARGET_HEXAGON
169 #define GEN_SUBTARGETS \
183 #define SUBTARGET(x) \
185 extern const SubtargetFeatureKV x##FeatureKV[]; \
186 extern const SubtargetFeatureKV x##SubTypeKV[]; \
192 extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
193 const char *Feature) {
194 TargetMachine *Target = unwrap(TM);
195 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
196 return MCInfo->checkFeatures(std::string("+") + Feature);
199 enum class LLVMRustCodeModel {
208 static Optional<CodeModel::Model> fromRust(LLVMRustCodeModel Model) {
210 case LLVMRustCodeModel::Tiny:
211 return CodeModel::Tiny;
212 case LLVMRustCodeModel::Small:
213 return CodeModel::Small;
214 case LLVMRustCodeModel::Kernel:
215 return CodeModel::Kernel;
216 case LLVMRustCodeModel::Medium:
217 return CodeModel::Medium;
218 case LLVMRustCodeModel::Large:
219 return CodeModel::Large;
220 case LLVMRustCodeModel::None:
223 report_fatal_error("Bad CodeModel.");
227 enum class LLVMRustCodeGenOptLevel {
234 static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
236 case LLVMRustCodeGenOptLevel::None:
237 return CodeGenOpt::None;
238 case LLVMRustCodeGenOptLevel::Less:
239 return CodeGenOpt::Less;
240 case LLVMRustCodeGenOptLevel::Default:
241 return CodeGenOpt::Default;
242 case LLVMRustCodeGenOptLevel::Aggressive:
243 return CodeGenOpt::Aggressive;
245 report_fatal_error("Bad CodeGenOptLevel.");
249 enum class LLVMRustPassBuilderOptLevel {
258 #if LLVM_VERSION_LT(14,0)
259 using OptimizationLevel = PassBuilder::OptimizationLevel;
262 static OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
264 case LLVMRustPassBuilderOptLevel::O0:
265 return OptimizationLevel::O0;
266 case LLVMRustPassBuilderOptLevel::O1:
267 return OptimizationLevel::O1;
268 case LLVMRustPassBuilderOptLevel::O2:
269 return OptimizationLevel::O2;
270 case LLVMRustPassBuilderOptLevel::O3:
271 return OptimizationLevel::O3;
272 case LLVMRustPassBuilderOptLevel::Os:
273 return OptimizationLevel::Os;
274 case LLVMRustPassBuilderOptLevel::Oz:
275 return OptimizationLevel::Oz;
277 report_fatal_error("Bad PassBuilderOptLevel.");
281 enum class LLVMRustRelocModel {
290 static Reloc::Model fromRust(LLVMRustRelocModel RustReloc) {
292 case LLVMRustRelocModel::Static:
293 return Reloc::Static;
294 case LLVMRustRelocModel::PIC:
296 case LLVMRustRelocModel::DynamicNoPic:
297 return Reloc::DynamicNoPIC;
298 case LLVMRustRelocModel::ROPI:
300 case LLVMRustRelocModel::RWPI:
302 case LLVMRustRelocModel::ROPIRWPI:
303 return Reloc::ROPI_RWPI;
305 report_fatal_error("Bad RelocModel.");
309 /// getLongestEntryLength - Return the length of the longest entry in the table.
310 template<typename KV>
311 static size_t getLongestEntryLength(ArrayRef<KV> Table) {
313 for (auto &I : Table)
314 MaxLen = std::max(MaxLen, std::strlen(I.Key));
318 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
319 const TargetMachine *Target = unwrap(TM);
320 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
321 const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
322 const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
323 const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
324 unsigned MaxCPULen = getLongestEntryLength(CPUTable);
326 printf("Available CPUs for this target:\n");
327 if (HostArch == TargetArch) {
328 const StringRef HostCPU = sys::getHostCPUName();
329 printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
330 MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
332 for (auto &CPU : CPUTable)
333 printf(" %-*s\n", MaxCPULen, CPU.Key);
337 extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
338 const TargetMachine *Target = unwrap(TM);
339 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
340 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
341 return FeatTable.size();
344 extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef TM, size_t Index,
345 const char** Feature, const char** Desc) {
346 const TargetMachine *Target = unwrap(TM);
347 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
348 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
349 const SubtargetFeatureKV Feat = FeatTable[Index];
356 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
357 printf("Target CPU help is not supported by this LLVM version.\n\n");
360 extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef) {
364 extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef, const char**, const char**) {}
367 extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
368 StringRef Name = sys::getHostCPUName();
373 extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
374 const char *TripleStr, const char *CPU, const char *Feature,
375 const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocModel RustReloc,
376 LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
377 bool FunctionSections,
379 bool UniqueSectionNames,
380 bool TrapUnreachable,
383 bool EmitStackSizeSection,
384 bool RelaxELFRelocations,
386 const char *SplitDwarfFile) {
388 auto OptLevel = fromRust(RustOptLevel);
389 auto RM = fromRust(RustReloc);
390 auto CM = fromRust(RustCM);
393 Triple Trip(Triple::normalize(TripleStr));
394 const llvm::Target *TheTarget =
395 TargetRegistry::lookupTarget(Trip.getTriple(), Error);
396 if (TheTarget == nullptr) {
397 LLVMRustSetLastError(Error.c_str());
401 TargetOptions Options;
403 Options.FloatABIType = FloatABI::Default;
405 Options.FloatABIType = FloatABI::Soft;
407 Options.DataSections = DataSections;
408 Options.FunctionSections = FunctionSections;
409 Options.UniqueSectionNames = UniqueSectionNames;
410 Options.MCOptions.AsmVerbose = AsmComments;
411 Options.MCOptions.PreserveAsmComments = AsmComments;
412 Options.MCOptions.ABIName = ABIStr;
413 if (SplitDwarfFile) {
414 Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
416 Options.RelaxELFRelocations = RelaxELFRelocations;
417 Options.UseInitArray = UseInitArray;
419 if (TrapUnreachable) {
420 // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
421 // This limits the extent of possible undefined behavior in some cases, as
422 // it prevents control flow from "falling through" into whatever code
423 // happens to be laid out next in memory.
424 Options.TrapUnreachable = true;
428 Options.ThreadModel = ThreadModel::Single;
431 Options.EmitStackSizeSection = EmitStackSizeSection;
433 TargetMachine *TM = TheTarget->createTargetMachine(
434 Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
438 extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
442 // Unfortunately, the LLVM C API doesn't provide a way to create the
443 // TargetLibraryInfo pass, so we use this method to do so.
444 extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
445 bool DisableSimplifyLibCalls) {
446 Triple TargetTriple(unwrap(M)->getTargetTriple());
447 TargetLibraryInfoImpl TLII(TargetTriple);
448 if (DisableSimplifyLibCalls)
449 TLII.disableAllFunctions();
450 unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
453 extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
454 // Initializing the command-line options more than once is not allowed. So,
455 // check if they've already been initialized. (This could happen if we're
456 // being called from rustpkg, for example). If the arguments change, then
457 // that's just kinda unfortunate.
458 static bool Initialized = false;
462 cl::ParseCommandLineOptions(Argc, Argv);
465 enum class LLVMRustFileType {
470 static CodeGenFileType fromRust(LLVMRustFileType Type) {
472 case LLVMRustFileType::AssemblyFile:
473 return CGFT_AssemblyFile;
474 case LLVMRustFileType::ObjectFile:
475 return CGFT_ObjectFile;
477 report_fatal_error("Bad FileType.");
481 extern "C" LLVMRustResult
482 LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
483 LLVMModuleRef M, const char *Path, const char *DwoPath,
484 LLVMRustFileType RustFileType) {
485 llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
486 auto FileType = fromRust(RustFileType);
488 std::string ErrorInfo;
490 raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
492 ErrorInfo = EC.message();
493 if (ErrorInfo != "") {
494 LLVMRustSetLastError(ErrorInfo.c_str());
495 return LLVMRustResult::Failure;
498 buffer_ostream BOS(OS);
500 raw_fd_ostream DOS(DwoPath, EC, sys::fs::OF_None);
503 ErrorInfo = EC.message();
504 if (ErrorInfo != "") {
505 LLVMRustSetLastError(ErrorInfo.c_str());
506 return LLVMRustResult::Failure;
508 buffer_ostream DBOS(DOS);
509 unwrap(Target)->addPassesToEmitFile(*PM, BOS, &DBOS, FileType, false);
512 unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
516 // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
517 // stream (OS), so the only real safe place to delete this is here? Don't we
518 // wish this was written in Rust?
519 LLVMDisposePassManager(PMR);
520 return LLVMRustResult::Success;
523 extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
524 const char*, // pass name
525 const char*); // IR name
526 extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
528 std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
529 if (any_isa<const Module *>(WrappedIr))
530 return any_cast<const Module *>(WrappedIr)->getName().str();
531 if (any_isa<const Function *>(WrappedIr))
532 return any_cast<const Function *>(WrappedIr)->getName().str();
533 if (any_isa<const Loop *>(WrappedIr))
534 return any_cast<const Loop *>(WrappedIr)->getName().str();
535 if (any_isa<const LazyCallGraph::SCC *>(WrappedIr))
536 return any_cast<const LazyCallGraph::SCC *>(WrappedIr)->getName();
541 void LLVMSelfProfileInitializeCallbacks(
542 PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
543 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
544 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
545 PIC.registerBeforeNonSkippedPassCallback([LlvmSelfProfiler, BeforePassCallback](
546 StringRef Pass, llvm::Any Ir) {
547 std::string PassName = Pass.str();
548 std::string IrName = LLVMRustwrappedIrGetName(Ir);
549 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
552 PIC.registerAfterPassCallback(
553 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any IR,
554 const PreservedAnalyses &Preserved) {
555 AfterPassCallback(LlvmSelfProfiler);
558 PIC.registerAfterPassInvalidatedCallback(
559 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, const PreservedAnalyses &Preserved) {
560 AfterPassCallback(LlvmSelfProfiler);
563 PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
564 StringRef Pass, llvm::Any Ir) {
565 std::string PassName = Pass.str();
566 std::string IrName = LLVMRustwrappedIrGetName(Ir);
567 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
570 PIC.registerAfterAnalysisCallback(
571 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
572 AfterPassCallback(LlvmSelfProfiler);
576 enum class LLVMRustOptStage {
584 struct LLVMRustSanitizerOptions {
585 bool SanitizeAddress;
586 bool SanitizeAddressRecover;
588 bool SanitizeMemoryRecover;
589 int SanitizeMemoryTrackOrigins;
591 bool SanitizeHWAddress;
592 bool SanitizeHWAddressRecover;
595 extern "C" LLVMRustResult
597 LLVMModuleRef ModuleRef,
598 LLVMTargetMachineRef TMRef,
599 LLVMRustPassBuilderOptLevel OptLevelRust,
600 LLVMRustOptStage OptStage,
601 bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
602 bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
603 bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
604 LLVMRustSanitizerOptions *SanitizerOptions,
605 const char *PGOGenPath, const char *PGOUsePath,
606 bool InstrumentCoverage, const char *InstrProfileOutput,
608 const char *PGOSampleUsePath, bool DebugInfoForProfiling,
609 void* LlvmSelfProfiler,
610 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
611 LLVMRustSelfProfileAfterPassCallback AfterPassCallback,
612 const char *ExtraPasses, size_t ExtraPassesLen,
613 const char *LLVMPlugins, size_t LLVMPluginsLen) {
614 Module *TheModule = unwrap(ModuleRef);
615 TargetMachine *TM = unwrap(TMRef);
616 OptimizationLevel OptLevel = fromRust(OptLevelRust);
619 PipelineTuningOptions PTO;
620 PTO.LoopUnrolling = UnrollLoops;
621 PTO.LoopInterleaving = UnrollLoops;
622 PTO.LoopVectorization = LoopVectorize;
623 PTO.SLPVectorization = SLPVectorize;
624 PTO.MergeFunctions = MergeFunctions;
626 // FIXME: We may want to expose this as an option.
627 bool DebugPassManager = false;
629 PassInstrumentationCallbacks PIC;
630 StandardInstrumentations SI(DebugPassManager);
631 SI.registerCallbacks(PIC);
633 if (LlvmSelfProfiler){
634 LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
637 Optional<PGOOptions> PGOOpt;
639 assert(!PGOUsePath && !PGOSampleUsePath);
640 PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr,
641 PGOOptions::NoCSAction, DebugInfoForProfiling);
642 } else if (PGOUsePath) {
643 assert(!PGOSampleUsePath);
644 PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse,
645 PGOOptions::NoCSAction, DebugInfoForProfiling);
646 } else if (PGOSampleUsePath) {
647 PGOOpt = PGOOptions(PGOSampleUsePath, "", "", PGOOptions::SampleUse,
648 PGOOptions::NoCSAction, DebugInfoForProfiling);
649 } else if (DebugInfoForProfiling) {
650 PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
651 PGOOptions::NoCSAction, DebugInfoForProfiling);
654 PassBuilder PB(TM, PTO, PGOOpt, &PIC);
655 LoopAnalysisManager LAM;
656 FunctionAnalysisManager FAM;
657 CGSCCAnalysisManager CGAM;
658 ModuleAnalysisManager MAM;
660 FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
662 Triple TargetTriple(TheModule->getTargetTriple());
663 std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
664 if (DisableSimplifyLibCalls)
665 TLII->disableAllFunctions();
666 FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
668 PB.registerModuleAnalyses(MAM);
669 PB.registerCGSCCAnalyses(CGAM);
670 PB.registerFunctionAnalyses(FAM);
671 PB.registerLoopAnalyses(LAM);
672 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
674 // We manually collect pipeline callbacks so we can apply them at O0, where the
675 // PassBuilder does not create a pipeline.
676 std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
677 PipelineStartEPCallbacks;
678 std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
679 OptimizerLastEPCallbacks;
682 PipelineStartEPCallbacks.push_back(
683 [VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
684 MPM.addPass(VerifierPass());
689 if (InstrumentGCOV) {
690 PipelineStartEPCallbacks.push_back(
691 [](ModulePassManager &MPM, OptimizationLevel Level) {
692 MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
697 if (InstrumentCoverage) {
698 PipelineStartEPCallbacks.push_back(
699 [InstrProfileOutput](ModulePassManager &MPM, OptimizationLevel Level) {
700 InstrProfOptions Options;
701 if (InstrProfileOutput) {
702 Options.InstrProfileOutput = InstrProfileOutput;
704 MPM.addPass(InstrProfiling(Options, false));
709 if (SanitizerOptions) {
710 if (SanitizerOptions->SanitizeMemory) {
711 #if LLVM_VERSION_GE(14, 0)
712 MemorySanitizerOptions Options(
713 SanitizerOptions->SanitizeMemoryTrackOrigins,
714 SanitizerOptions->SanitizeMemoryRecover,
715 /*CompileKernel=*/false,
716 /*EagerChecks=*/true);
718 MemorySanitizerOptions Options(
719 SanitizerOptions->SanitizeMemoryTrackOrigins,
720 SanitizerOptions->SanitizeMemoryRecover,
721 /*CompileKernel=*/false);
723 OptimizerLastEPCallbacks.push_back(
724 [Options](ModulePassManager &MPM, OptimizationLevel Level) {
725 #if LLVM_VERSION_GE(14, 0) && LLVM_VERSION_LT(16, 0)
726 MPM.addPass(ModuleMemorySanitizerPass(Options));
728 MPM.addPass(MemorySanitizerPass(Options));
730 #if LLVM_VERSION_LT(16, 0)
731 MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass(Options)));
737 if (SanitizerOptions->SanitizeThread) {
738 OptimizerLastEPCallbacks.push_back(
739 [](ModulePassManager &MPM, OptimizationLevel Level) {
740 #if LLVM_VERSION_GE(14, 0)
741 MPM.addPass(ModuleThreadSanitizerPass());
743 MPM.addPass(ThreadSanitizerPass());
745 MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
750 if (SanitizerOptions->SanitizeAddress) {
751 OptimizerLastEPCallbacks.push_back(
752 [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
753 #if LLVM_VERSION_LT(15, 0)
754 MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
756 #if LLVM_VERSION_GE(14, 0)
757 AddressSanitizerOptions opts = AddressSanitizerOptions{
758 /*CompileKernel=*/false,
759 SanitizerOptions->SanitizeAddressRecover,
760 /*UseAfterScope=*/true,
761 AsanDetectStackUseAfterReturnMode::Runtime,
763 #if LLVM_VERSION_LT(16, 0)
764 MPM.addPass(ModuleAddressSanitizerPass(opts));
766 MPM.addPass(AddressSanitizerPass(opts));
769 MPM.addPass(ModuleAddressSanitizerPass(
770 /*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
771 MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
772 /*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover,
773 /*UseAfterScope=*/true)));
778 if (SanitizerOptions->SanitizeHWAddress) {
779 OptimizerLastEPCallbacks.push_back(
780 [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
781 #if LLVM_VERSION_GE(14, 0)
782 HWAddressSanitizerOptions opts(
783 /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover,
784 /*DisableOptimization=*/false);
785 MPM.addPass(HWAddressSanitizerPass(opts));
787 MPM.addPass(HWAddressSanitizerPass(
788 /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
795 if (LLVMPluginsLen) {
796 auto PluginsStr = StringRef(LLVMPlugins, LLVMPluginsLen);
797 SmallVector<StringRef> Plugins;
798 PluginsStr.split(Plugins, ',', -1, false);
799 for (auto PluginPath: Plugins) {
800 auto Plugin = PassPlugin::Load(PluginPath.str());
802 LLVMRustSetLastError(("Failed to load pass plugin" + PluginPath.str()).c_str());
805 Plugin->registerPassBuilderCallbacks(PB);
809 ModulePassManager MPM;
810 bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
811 if (!NoPrepopulatePasses) {
812 // The pre-link pipelines don't support O0 and require using budilO0DefaultPipeline() instead.
813 // At the same time, the LTO pipelines do support O0 and using them is required.
814 bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO || OptStage == LLVMRustOptStage::FatLTO;
815 if (OptLevel == OptimizationLevel::O0 && !IsLTO) {
816 for (const auto &C : PipelineStartEPCallbacks)
817 PB.registerPipelineStartEPCallback(C);
818 for (const auto &C : OptimizerLastEPCallbacks)
819 PB.registerOptimizerLastEPCallback(C);
821 // Pass false as we manually schedule ThinLTOBufferPasses below.
822 MPM = PB.buildO0DefaultPipeline(OptLevel, /* PreLinkLTO */ false);
824 for (const auto &C : PipelineStartEPCallbacks)
825 PB.registerPipelineStartEPCallback(C);
826 if (OptStage != LLVMRustOptStage::PreLinkThinLTO) {
827 for (const auto &C : OptimizerLastEPCallbacks)
828 PB.registerOptimizerLastEPCallback(C);
832 case LLVMRustOptStage::PreLinkNoLTO:
833 MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
835 case LLVMRustOptStage::PreLinkThinLTO:
836 MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel);
837 // The ThinLTOPreLink pipeline already includes ThinLTOBuffer passes. However, callback
838 // passes may still run afterwards. This means we need to run the buffer passes again.
839 // FIXME: In LLVM 13, the ThinLTOPreLink pipeline also runs OptimizerLastEPCallbacks
840 // before the RequiredLTOPreLinkPasses, in which case we can remove these hacks.
841 if (OptimizerLastEPCallbacks.empty())
842 NeedThinLTOBufferPasses = false;
843 for (const auto &C : OptimizerLastEPCallbacks)
846 case LLVMRustOptStage::PreLinkFatLTO:
847 MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel);
848 NeedThinLTOBufferPasses = false;
850 case LLVMRustOptStage::ThinLTO:
851 // FIXME: Does it make sense to pass the ModuleSummaryIndex?
852 // It only seems to be needed for C++ specific optimizations.
853 MPM = PB.buildThinLTODefaultPipeline(OptLevel, nullptr);
855 case LLVMRustOptStage::FatLTO:
856 MPM = PB.buildLTODefaultPipeline(OptLevel, nullptr);
861 // We're not building any of the default pipelines but we still want to
862 // add the verifier, instrumentation, etc passes if they were requested
863 for (const auto &C : PipelineStartEPCallbacks)
865 for (const auto &C : OptimizerLastEPCallbacks)
869 if (ExtraPassesLen) {
870 if (auto Err = PB.parsePassPipeline(MPM, StringRef(ExtraPasses, ExtraPassesLen))) {
871 std::string ErrMsg = toString(std::move(Err));
872 LLVMRustSetLastError(ErrMsg.c_str());
873 return LLVMRustResult::Failure;
877 if (NeedThinLTOBufferPasses) {
878 MPM.addPass(CanonicalizeAliasesPass());
879 MPM.addPass(NameAnonGlobalPass());
882 // Upgrade all calls to old intrinsics first.
883 for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
884 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
886 MPM.run(*TheModule, MAM);
887 return LLVMRustResult::Success;
890 // Callback to demangle function name
892 // * name to be demangled
895 // * output buffer len
896 // Returns len of demangled string, or 0 if demangle failed.
897 typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
902 class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
904 std::vector<char> Buf;
907 RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
909 // Return empty string if demangle failed
910 // or if name does not need to be demangled
911 StringRef CallDemangle(StringRef name) {
916 if (Buf.size() < name.size() * 2) {
917 // Semangled name usually shorter than mangled,
918 // but allocate twice as much memory just in case
919 Buf.resize(name.size() * 2);
922 auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
928 auto Demangled = StringRef(Buf.data(), R);
929 if (Demangled == name) {
930 // Do not print anything if demangled name is equal to mangled.
937 void emitFunctionAnnot(const Function *F,
938 formatted_raw_ostream &OS) override {
939 StringRef Demangled = CallDemangle(F->getName());
940 if (Demangled.empty()) {
944 OS << "; " << Demangled << "\n";
947 void emitInstructionAnnot(const Instruction *I,
948 formatted_raw_ostream &OS) override {
951 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
953 Value = CI->getCalledOperand();
954 } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
956 Value = II->getCalledOperand();
958 // Could demangle more operations, e. g.
959 // `store %place, @function`.
963 if (!Value->hasName()) {
967 StringRef Demangled = CallDemangle(Value->getName());
968 if (Demangled.empty()) {
972 OS << "; " << Name << " " << Demangled << "\n";
978 extern "C" LLVMRustResult
979 LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
980 std::string ErrorInfo;
982 raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
984 ErrorInfo = EC.message();
985 if (ErrorInfo != "") {
986 LLVMRustSetLastError(ErrorInfo.c_str());
987 return LLVMRustResult::Failure;
990 RustAssemblyAnnotationWriter AAW(Demangle);
991 formatted_raw_ostream FOS(OS);
992 unwrap(M)->print(FOS, &AAW);
994 return LLVMRustResult::Success;
997 extern "C" void LLVMRustPrintPasses() {
998 LLVMInitializePasses();
999 struct MyListener : PassRegistrationListener {
1000 void passEnumerate(const PassInfo *Info) {
1001 StringRef PassArg = Info->getPassArgument();
1002 StringRef PassName = Info->getPassName();
1003 if (!PassArg.empty()) {
1004 // These unsigned->signed casts could theoretically overflow, but
1005 // realistically never will (and even if, the result is implementation
1006 // defined rather plain UB).
1007 printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
1008 (int)PassName.size(), PassName.data());
1013 PassRegistry *PR = PassRegistry::getPassRegistry();
1014 PR->enumerateWith(&Listener);
1017 extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
1019 auto PreserveFunctions = [=](const GlobalValue &GV) {
1020 for (size_t I = 0; I < Len; I++) {
1021 if (GV.getName() == Symbols[I]) {
1028 internalizeModule(*unwrap(M), PreserveFunctions);
1032 LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
1033 LLVMTargetMachineRef TMR) {
1034 TargetMachine *Target = unwrap(TMR);
1035 unwrap(Module)->setDataLayout(Target->createDataLayout());
1038 extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
1039 unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
1042 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
1043 unwrap(M)->setPIELevel(PIELevel::Level::Large);
1046 extern "C" void LLVMRustSetModuleCodeModel(LLVMModuleRef M,
1047 LLVMRustCodeModel Model) {
1048 auto CM = fromRust(Model);
1051 unwrap(M)->setCodeModel(*CM);
1054 // Here you'll find an implementation of ThinLTO as used by the Rust compiler
1055 // right now. This ThinLTO support is only enabled on "recent ish" versions of
1056 // LLVM, and otherwise it's just blanket rejected from other compilers.
1058 // Most of this implementation is straight copied from LLVM. At the time of
1059 // this writing it wasn't *quite* suitable to reuse more code from upstream
1060 // for our purposes, but we should strive to upstream this support once it's
1061 // ready to go! I figure we may want a bit of testing locally first before
1062 // sending this upstream to LLVM. I hear though they're quite eager to receive
1063 // feedback like this!
1065 // If you're reading this code and wondering "what in the world" or you're
1066 // working "good lord by LLVM upgrade is *still* failing due to these bindings"
1067 // then fear not! (ok maybe fear a little). All code here is mostly based
1068 // on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
1070 // You'll find that the general layout here roughly corresponds to the `run`
1071 // method in that file as well as `ProcessThinLTOModule`. Functions are
1072 // specifically commented below as well, but if you're updating this code
1073 // or otherwise trying to understand it, the LLVM source will be useful in
1074 // interpreting the mysteries within.
1076 // Otherwise I'll apologize in advance, it probably requires a relatively
1077 // significant investment on your part to "truly understand" what's going on
1078 // here. Not saying I do myself, but it took me awhile staring at LLVM's source
1079 // and various online resources about ThinLTO to make heads or tails of all
1082 // This is a shared data structure which *must* be threadsafe to share
1083 // read-only amongst threads. This also corresponds basically to the arguments
1084 // of the `ProcessThinLTOModule` function in the LLVM source.
1085 struct LLVMRustThinLTOData {
1086 // The combined index that is the global analysis over all modules we're
1087 // performing ThinLTO for. This is mostly managed by LLVM.
1088 ModuleSummaryIndex Index;
1090 // All modules we may look at, stored as in-memory serialized versions. This
1091 // is later used when inlining to ensure we can extract any module to inline
1093 StringMap<MemoryBufferRef> ModuleMap;
1095 // A set that we manage of everything we *don't* want internalized. Note that
1096 // this includes all transitive references right now as well, but it may not
1098 DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
1100 // Not 100% sure what these are, but they impact what's internalized and
1101 // what's inlined across modules, I believe.
1102 StringMap<FunctionImporter::ImportMapTy> ImportLists;
1103 StringMap<FunctionImporter::ExportSetTy> ExportLists;
1104 StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
1105 StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
1107 LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
1110 // Just an argument to the `LLVMRustCreateThinLTOData` function below.
1111 struct LLVMRustThinLTOModule {
1112 const char *identifier;
1117 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
1119 static const GlobalValueSummary *
1120 getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
1121 auto StrongDefForLinker = llvm::find_if(
1122 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1123 auto Linkage = Summary->linkage();
1124 return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
1125 !GlobalValue::isWeakForLinker(Linkage);
1127 if (StrongDefForLinker != GVSummaryList.end())
1128 return StrongDefForLinker->get();
1130 auto FirstDefForLinker = llvm::find_if(
1131 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1132 auto Linkage = Summary->linkage();
1133 return !GlobalValue::isAvailableExternallyLinkage(Linkage);
1135 if (FirstDefForLinker == GVSummaryList.end())
1137 return FirstDefForLinker->get();
1140 // The main entry point for creating the global ThinLTO analysis. The structure
1141 // here is basically the same as before threads are spawned in the `run`
1142 // function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
1143 extern "C" LLVMRustThinLTOData*
1144 LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
1146 const char **preserved_symbols,
1148 auto Ret = std::make_unique<LLVMRustThinLTOData>();
1150 // Load each module's summary and merge it into one combined index
1151 for (int i = 0; i < num_modules; i++) {
1152 auto module = &modules[i];
1153 StringRef buffer(module->data, module->len);
1154 MemoryBufferRef mem_buffer(buffer, module->identifier);
1156 Ret->ModuleMap[module->identifier] = mem_buffer;
1158 if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
1159 LLVMRustSetLastError(toString(std::move(Err)).c_str());
1164 // Collect for each module the list of function it defines (GUID -> Summary)
1165 Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
1167 // Convert the preserved symbols set from string to GUID, this is then needed
1168 // for internalization.
1169 for (int i = 0; i < num_symbols; i++) {
1170 auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
1171 Ret->GUIDPreservedSymbols.insert(GUID);
1174 // Collect the import/export lists for all modules from the call-graph in the
1177 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
1178 auto deadIsPrevailing = [&](GlobalValue::GUID G) {
1179 return PrevailingType::Unknown;
1181 // We don't have a complete picture in our use of ThinLTO, just our immediate
1182 // crate, so we need `ImportEnabled = false` to limit internalization.
1183 // Otherwise, we sometimes lose `static` values -- see #60184.
1184 computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
1185 deadIsPrevailing, /* ImportEnabled = */ false);
1186 ComputeCrossModuleImport(
1188 Ret->ModuleToDefinedGVSummaries,
1193 // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
1194 // impacts the caching.
1196 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
1197 // being lifted from `lib/LTO/LTO.cpp` as well
1198 DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
1199 for (auto &I : Ret->Index) {
1200 if (I.second.SummaryList.size() > 1)
1201 PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
1203 auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
1204 const auto &Prevailing = PrevailingCopy.find(GUID);
1205 if (Prevailing == PrevailingCopy.end())
1207 return Prevailing->second == S;
1209 auto recordNewLinkage = [&](StringRef ModuleIdentifier,
1210 GlobalValue::GUID GUID,
1211 GlobalValue::LinkageTypes NewLinkage) {
1212 Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
1215 // Uses FromPrevailing visibility scheme which works for many binary
1216 // formats. We probably could and should use ELF visibility scheme for many of
1217 // our targets, however.
1219 thinLTOResolvePrevailingInIndex(conf, Ret->Index, isPrevailing, recordNewLinkage,
1220 Ret->GUIDPreservedSymbols);
1222 // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
1223 // callback below. This callback below will dictate the linkage for all
1224 // summaries in the index, and we basically just only want to ensure that dead
1225 // symbols are internalized. Otherwise everything that's already external
1226 // linkage will stay as external, and internal will stay as internal.
1227 std::set<GlobalValue::GUID> ExportedGUIDs;
1228 for (auto &List : Ret->Index) {
1229 for (auto &GVS: List.second.SummaryList) {
1230 if (GlobalValue::isLocalLinkage(GVS->linkage()))
1232 auto GUID = GVS->getOriginalName();
1233 if (GVS->flags().Live)
1234 ExportedGUIDs.insert(GUID);
1237 auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
1238 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1239 return (ExportList != Ret->ExportLists.end() &&
1240 ExportList->second.count(VI)) ||
1241 ExportedGUIDs.count(VI.getGUID());
1243 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
1245 return Ret.release();
1249 LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
1253 // Below are the various passes that happen *per module* when doing ThinLTO.
1255 // In other words, these are the functions that are all run concurrently
1256 // with one another, one per module. The passes here correspond to the analysis
1257 // passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
1258 // `ProcessThinLTOModule` function. Here they're split up into separate steps
1259 // so rustc can save off the intermediate bytecode between each step.
1262 clearDSOLocalOnDeclarations(Module &Mod, TargetMachine &TM) {
1263 // When linking an ELF shared object, dso_local should be dropped. We
1264 // conservatively do this for -fpic.
1265 bool ClearDSOLocalOnDeclarations =
1266 TM.getTargetTriple().isOSBinFormatELF() &&
1267 TM.getRelocationModel() != Reloc::Static &&
1268 Mod.getPIELevel() == PIELevel::Default;
1269 return ClearDSOLocalOnDeclarations;
1273 LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
1274 LLVMTargetMachineRef TM) {
1275 Module &Mod = *unwrap(M);
1276 TargetMachine &Target = *unwrap(TM);
1278 bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
1279 bool error = renameModuleForThinLTO(Mod, Data->Index, ClearDSOLocal);
1282 LLVMRustSetLastError("renameModuleForThinLTO failed");
1289 LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1290 Module &Mod = *unwrap(M);
1291 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1292 #if LLVM_VERSION_GE(14, 0)
1293 thinLTOFinalizeInModule(Mod, DefinedGlobals, /*PropagateAttrs=*/true);
1295 thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
1301 LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1302 Module &Mod = *unwrap(M);
1303 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1304 thinLTOInternalizeModule(Mod, DefinedGlobals);
1309 LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
1310 LLVMTargetMachineRef TM) {
1311 Module &Mod = *unwrap(M);
1312 TargetMachine &Target = *unwrap(TM);
1314 const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
1315 auto Loader = [&](StringRef Identifier) {
1316 const auto &Memory = Data->ModuleMap.lookup(Identifier);
1317 auto &Context = Mod.getContext();
1318 auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
1323 // The rest of this closure is a workaround for
1324 // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
1325 // we accidentally import wasm custom sections into different modules,
1326 // duplicating them by in the final output artifact.
1328 // The issue is worked around here by manually removing the
1329 // `wasm.custom_sections` named metadata node from any imported module. This
1330 // we know isn't used by any optimization pass so there's no need for it to
1333 // Note that the metadata is currently lazily loaded, so we materialize it
1334 // here before looking up if there's metadata inside. The `FunctionImporter`
1335 // will immediately materialize metadata anyway after an import, so this
1336 // shouldn't be a perf hit.
1337 if (Error Err = (*MOrErr)->materializeMetadata()) {
1338 Expected<std::unique_ptr<Module>> Ret(std::move(Err));
1342 auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
1343 if (WasmCustomSections)
1344 WasmCustomSections->eraseFromParent();
1348 bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
1349 FunctionImporter Importer(Data->Index, Loader, ClearDSOLocal);
1350 Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
1352 LLVMRustSetLastError(toString(Result.takeError()).c_str());
1358 // This struct and various functions are sort of a hack right now, but the
1359 // problem is that we've got in-memory LLVM modules after we generate and
1360 // optimize all codegen-units for one compilation in rustc. To be compatible
1361 // with the LTO support above we need to serialize the modules plus their
1362 // ThinLTO summary into memory.
1364 // This structure is basically an owned version of a serialize module, with
1365 // a ThinLTO summary attached.
1366 struct LLVMRustThinLTOBuffer {
1370 extern "C" LLVMRustThinLTOBuffer*
1371 LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin) {
1372 auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
1374 raw_string_ostream OS(Ret->data);
1378 LoopAnalysisManager LAM;
1379 FunctionAnalysisManager FAM;
1380 CGSCCAnalysisManager CGAM;
1381 ModuleAnalysisManager MAM;
1382 PB.registerModuleAnalyses(MAM);
1383 PB.registerCGSCCAnalyses(CGAM);
1384 PB.registerFunctionAnalyses(FAM);
1385 PB.registerLoopAnalyses(LAM);
1386 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1387 ModulePassManager MPM;
1388 MPM.addPass(ThinLTOBitcodeWriterPass(OS, nullptr));
1389 MPM.run(*unwrap(M), MAM);
1391 WriteBitcodeToFile(*unwrap(M), OS);
1395 return Ret.release();
1399 LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
1403 extern "C" const void*
1404 LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
1405 return Buffer->data.data();
1409 LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
1410 return Buffer->data.length();
1413 // This is what we used to parse upstream bitcode for actual ThinLTO
1414 // processing. We'll call this once per module optimized through ThinLTO, and
1415 // it'll be called concurrently on many threads.
1416 extern "C" LLVMModuleRef
1417 LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
1420 const char *identifier) {
1421 StringRef Data(data, len);
1422 MemoryBufferRef Buffer(Data, identifier);
1423 unwrap(Context)->enableDebugTypeODRUniquing();
1424 Expected<std::unique_ptr<Module>> SrcOrError =
1425 parseBitcodeFile(Buffer, *unwrap(Context));
1427 LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
1430 return wrap(std::move(*SrcOrError).release());
1433 // Find the bitcode section in the object file data and return it as a slice.
1434 // Fail if the bitcode section is present but empty.
1436 // On success, the return value is the pointer to the start of the slice and
1437 // `out_len` is filled with the (non-zero) length. On failure, the return value
1438 // is `nullptr` and `out_len` is set to zero.
1439 extern "C" const char*
1440 LLVMRustGetBitcodeSliceFromObjectData(const char *data,
1445 StringRef Data(data, len);
1446 MemoryBufferRef Buffer(Data, ""); // The id is unused.
1448 Expected<MemoryBufferRef> BitcodeOrError =
1449 object::IRObjectFile::findBitcodeInMemBuffer(Buffer);
1450 if (!BitcodeOrError) {
1451 LLVMRustSetLastError(toString(BitcodeOrError.takeError()).c_str());
1455 *out_len = BitcodeOrError->getBufferSize();
1456 return BitcodeOrError->getBufferStart();
1459 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1460 // the comment in `back/lto.rs` for why this exists.
1462 LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
1464 DICompileUnit **B) {
1465 Module *M = unwrap(Mod);
1466 DICompileUnit **Cur = A;
1467 DICompileUnit **Next = B;
1468 for (DICompileUnit *CU : M->debug_compile_units()) {
1477 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1478 // the comment in `back/lto.rs` for why this exists.
1480 LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
1481 Module *M = unwrap(Mod);
1483 // If the original source module didn't have a `DICompileUnit` then try to
1484 // merge all the existing compile units. If there aren't actually any though
1485 // then there's not much for us to do so return.
1486 if (Unit == nullptr) {
1487 for (DICompileUnit *CU : M->debug_compile_units()) {
1491 if (Unit == nullptr)
1495 // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
1496 // process it recursively. Note that we used to specifically iterate over
1497 // instructions to ensure we feed everything into it, but `processModule`
1498 // started doing this the same way in LLVM 7 (commit d769eb36ab2b8).
1499 DebugInfoFinder Finder;
1500 Finder.processModule(*M);
1502 // After we've found all our debuginfo, rewrite all subprograms to point to
1503 // the same `DICompileUnit`.
1504 for (auto &F : Finder.subprograms()) {
1505 F->replaceUnit(Unit);
1508 // Erase any other references to other `DICompileUnit` instances, the verifier
1509 // will later ensure that we don't actually have any other stale references to
1511 auto *MD = M->getNamedMetadata("llvm.dbg.cu");
1512 MD->clearOperands();
1513 MD->addOperand(Unit);
1516 // Computes the LTO cache key for the provided 'ModId' in the given 'Data',
1517 // storing the result in 'KeyOut'.
1518 // Currently, this cache key is a SHA-1 hash of anything that could affect
1519 // the result of optimizing this module (e.g. module imports, exports, liveness
1520 // of access globals, etc).
1521 // The precise details are determined by LLVM in `computeLTOCacheKey`, which is
1522 // used during the normal linker-plugin incremental thin-LTO process.
1524 LLVMRustComputeLTOCacheKey(RustStringRef KeyOut, const char *ModId, LLVMRustThinLTOData *Data) {
1525 SmallString<40> Key;
1526 llvm::lto::Config conf;
1527 const auto &ImportList = Data->ImportLists.lookup(ModId);
1528 const auto &ExportList = Data->ExportLists.lookup(ModId);
1529 const auto &ResolvedODR = Data->ResolvedODR.lookup(ModId);
1530 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(ModId);
1531 std::set<GlobalValue::GUID> CfiFunctionDefs;
1532 std::set<GlobalValue::GUID> CfiFunctionDecls;
1534 // Based on the 'InProcessThinBackend' constructor in LLVM
1535 for (auto &Name : Data->Index.cfiFunctionDefs())
1536 CfiFunctionDefs.insert(
1537 GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
1538 for (auto &Name : Data->Index.cfiFunctionDecls())
1539 CfiFunctionDecls.insert(
1540 GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
1542 llvm::computeLTOCacheKey(Key, conf, Data->Index, ModId,
1543 ImportList, ExportList, ResolvedODR, DefinedGlobals, CfiFunctionDefs, CfiFunctionDecls
1546 LLVMRustStringWriteImpl(KeyOut, Key.c_str(), Key.size());