1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Details of the algorithm:
12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
13 //
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/Instrumentation.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/DIBuilder.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/InlineAsm.h"
33 #include "llvm/IR/InstVisitor.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/MDBuilder.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/DataTypes.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/Endian.h"
43 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/Cloning.h"
46 #include "llvm/Transforms/Utils/Local.h"
47 #include "llvm/Transforms/Utils/ModuleUtils.h"
48 #include <algorithm>
49 #include <string>
50 #include <system_error>
52 using namespace llvm;
54 #define DEBUG_TYPE "asan"
56 static const uint64_t kDefaultShadowScale = 3;
57 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
58 static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
59 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
60 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
61 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
62 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa8000;
63 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
64 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
66 static const size_t kMinStackMallocSize = 1 << 6; // 64B
67 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
68 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
69 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
71 static const char *const kAsanModuleCtorName = "asan.module_ctor";
72 static const char *const kAsanModuleDtorName = "asan.module_dtor";
73 static const int kAsanCtorAndDtorPriority = 1;
74 static const char *const kAsanReportErrorTemplate = "__asan_report_";
75 static const char *const kAsanReportLoadN = "__asan_report_load_n";
76 static const char *const kAsanReportStoreN = "__asan_report_store_n";
77 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
78 static const char *const kAsanUnregisterGlobalsName =
79 "__asan_unregister_globals";
80 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
81 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
82 static const char *const kAsanInitName = "__asan_init_v4";
83 static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init";
84 static const char *const kAsanCovName = "__sanitizer_cov";
85 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
86 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
87 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
88 static const int kMaxAsanStackMallocSizeClass = 10;
89 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
90 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
91 static const char *const kAsanGenPrefix = "__asan_gen_";
92 static const char *const kAsanPoisonStackMemoryName =
93 "__asan_poison_stack_memory";
94 static const char *const kAsanUnpoisonStackMemoryName =
95 "__asan_unpoison_stack_memory";
97 static const char *const kAsanOptionDetectUAR =
98 "__asan_option_detect_stack_use_after_return";
100 #ifndef NDEBUG
101 static const int kAsanStackAfterReturnMagic = 0xf5;
102 #endif
104 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
105 static const size_t kNumberOfAccessSizes = 5;
107 // Command-line flags.
109 // This flag may need to be replaced with -f[no-]asan-reads.
110 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
111 cl::desc("instrument read instructions"), cl::Hidden, cl::init(true));
112 static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
113 cl::desc("instrument write instructions"), cl::Hidden, cl::init(true));
114 static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics",
115 cl::desc("instrument atomic instructions (rmw, cmpxchg)"),
116 cl::Hidden, cl::init(true));
117 static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path",
118 cl::desc("use instrumentation with slow path for all accesses"),
119 cl::Hidden, cl::init(false));
120 // This flag limits the number of instructions to be instrumented
121 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
122 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
123 // set it to 10000.
124 static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
125 cl::init(10000),
126 cl::desc("maximal number of instructions to instrument in any given BB"),
127 cl::Hidden);
128 // This flag may need to be replaced with -f[no]asan-stack.
129 static cl::opt<bool> ClStack("asan-stack",
130 cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
131 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
132 cl::desc("Check return-after-free"), cl::Hidden, cl::init(true));
133 // This flag may need to be replaced with -f[no]asan-globals.
134 static cl::opt<bool> ClGlobals("asan-globals",
135 cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
136 static cl::opt<int> ClCoverage("asan-coverage",
137 cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks"),
138 cl::Hidden, cl::init(false));
139 static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold",
140 cl::desc("Add coverage instrumentation only to the entry block if there "
141 "are more than this number of blocks."),
142 cl::Hidden, cl::init(1500));
143 static cl::opt<bool> ClInitializers("asan-initialization-order",
144 cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
145 static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
146 cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
147 cl::Hidden, cl::init(false));
148 static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
149 cl::desc("Realign stack to the value of this flag (power of two)"),
150 cl::Hidden, cl::init(32));
151 static cl::opt<int> ClInstrumentationWithCallsThreshold(
152 "asan-instrumentation-with-call-threshold",
153 cl::desc("If the function being instrumented contains more than "
154 "this number of memory accesses, use callbacks instead of "
155 "inline checks (-1 means never use callbacks)."),
156 cl::Hidden, cl::init(7000));
157 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
158 "asan-memory-access-callback-prefix",
159 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
160 cl::init("__asan_"));
162 // This is an experimental feature that will allow to choose between
163 // instrumented and non-instrumented code at link-time.
164 // If this option is on, just before instrumenting a function we create its
165 // clone; if the function is not changed by asan the clone is deleted.
166 // If we end up with a clone, we put the instrumented function into a section
167 // called "ASAN" and the uninstrumented function into a section called "NOASAN".
168 //
169 // This is still a prototype, we need to figure out a way to keep two copies of
170 // a function so that the linker can easily choose one of them.
171 static cl::opt<bool> ClKeepUninstrumented("asan-keep-uninstrumented-functions",
172 cl::desc("Keep uninstrumented copies of functions"),
173 cl::Hidden, cl::init(false));
175 // These flags allow to change the shadow mapping.
176 // The shadow mapping looks like
177 // Shadow = (Mem >> scale) + (1 << offset_log)
178 static cl::opt<int> ClMappingScale("asan-mapping-scale",
179 cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
181 // Optimization flags. Not user visible, used mostly for testing
182 // and benchmarking the tool.
183 static cl::opt<bool> ClOpt("asan-opt",
184 cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true));
185 static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp",
186 cl::desc("Instrument the same temp just once"), cl::Hidden,
187 cl::init(true));
188 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
189 cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true));
191 static cl::opt<bool> ClCheckLifetime("asan-check-lifetime",
192 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"),
193 cl::Hidden, cl::init(false));
195 // Debug flags.
196 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
197 cl::init(0));
198 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
199 cl::Hidden, cl::init(0));
200 static cl::opt<std::string> ClDebugFunc("asan-debug-func",
201 cl::Hidden, cl::desc("Debug func"));
202 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
203 cl::Hidden, cl::init(-1));
204 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
205 cl::Hidden, cl::init(-1));
207 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
208 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
209 STATISTIC(NumOptimizedAccessesToGlobalArray,
210 "Number of optimized accesses to global arrays");
211 STATISTIC(NumOptimizedAccessesToGlobalVar,
212 "Number of optimized accesses to global vars");
214 namespace {
215 /// Frontend-provided metadata for global variables.
216 class GlobalsMetadata {
217 public:
218 struct Entry {
219 Entry() : SourceLoc(nullptr), IsDynInit(false), IsBlacklisted(false) {}
220 GlobalVariable *SourceLoc;
221 bool IsDynInit;
222 bool IsBlacklisted;
223 };
225 GlobalsMetadata() : inited_(false) {}
227 void init(Module& M) {
228 assert(!inited_);
229 inited_ = true;
230 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
231 if (!Globals)
232 return;
233 for (auto MDN : Globals->operands()) {
234 // Metadata node contains the global and the fields of "Entry".
235 assert(MDN->getNumOperands() == 4);
236 Value *V = MDN->getOperand(0);
237 // The optimizer may optimize away a global entirely.
238 if (!V)
239 continue;
240 GlobalVariable *GV = cast<GlobalVariable>(V);
241 // We can already have an entry for GV if it was merged with another
242 // global.
243 Entry &E = Entries[GV];
244 if (Value *Loc = MDN->getOperand(1)) {
245 GlobalVariable *GVLoc = cast<GlobalVariable>(Loc);
246 E.SourceLoc = GVLoc;
247 addSourceLocationGlobal(GVLoc);
248 }
249 ConstantInt *IsDynInit = cast<ConstantInt>(MDN->getOperand(2));
250 E.IsDynInit |= IsDynInit->isOne();
251 ConstantInt *IsBlacklisted = cast<ConstantInt>(MDN->getOperand(3));
252 E.IsBlacklisted |= IsBlacklisted->isOne();
253 }
254 }
256 /// Returns metadata entry for a given global.
257 Entry get(GlobalVariable *G) const {
258 auto Pos = Entries.find(G);
259 return (Pos != Entries.end()) ? Pos->second : Entry();
260 }
262 /// Check if the global was generated by the instrumentation
263 /// (we don't want to instrument it again in this case).
264 bool isInstrumentationGlobal(GlobalVariable *G) const {
265 return InstrumentationGlobals.count(G);
266 }
268 private:
269 bool inited_;
270 DenseMap<GlobalVariable*, Entry> Entries;
271 // Globals generated by the frontend instrumentation.
272 DenseSet<GlobalVariable*> InstrumentationGlobals;
274 void addSourceLocationGlobal(GlobalVariable *SourceLocGV) {
275 // Source location global is a struct with layout:
276 // {
277 // filename,
278 // i32 line_number,
279 // i32 column_number,
280 // }
281 InstrumentationGlobals.insert(SourceLocGV);
282 ConstantStruct *Contents =
283 cast<ConstantStruct>(SourceLocGV->getInitializer());
284 GlobalVariable *FilenameGV = cast<GlobalVariable>(Contents->getOperand(0));
285 InstrumentationGlobals.insert(FilenameGV);
286 }
287 };
289 /// This struct defines the shadow mapping using the rule:
290 /// shadow = (mem >> Scale) ADD-or-OR Offset.
291 struct ShadowMapping {
292 int Scale;
293 uint64_t Offset;
294 bool OrShadowOffset;
295 };
297 static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
298 llvm::Triple TargetTriple(M.getTargetTriple());
299 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
300 bool IsIOS = TargetTriple.getOS() == llvm::Triple::IOS;
301 bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD;
302 bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux;
303 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
304 TargetTriple.getArch() == llvm::Triple::ppc64le;
305 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
306 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
307 TargetTriple.getArch() == llvm::Triple::mipsel;
309 ShadowMapping Mapping;
311 if (LongSize == 32) {
312 if (IsAndroid)
313 Mapping.Offset = 0;
314 else if (IsMIPS32)
315 Mapping.Offset = kMIPS32_ShadowOffset32;
316 else if (IsFreeBSD)
317 Mapping.Offset = kFreeBSD_ShadowOffset32;
318 else if (IsIOS)
319 Mapping.Offset = kIOSShadowOffset32;
320 else
321 Mapping.Offset = kDefaultShadowOffset32;
322 } else { // LongSize == 64
323 if (IsPPC64)
324 Mapping.Offset = kPPC64_ShadowOffset64;
325 else if (IsFreeBSD)
326 Mapping.Offset = kFreeBSD_ShadowOffset64;
327 else if (IsLinux && IsX86_64)
328 Mapping.Offset = kSmallX86_64ShadowOffset;
329 else
330 Mapping.Offset = kDefaultShadowOffset64;
331 }
333 Mapping.Scale = kDefaultShadowScale;
334 if (ClMappingScale) {
335 Mapping.Scale = ClMappingScale;
336 }
338 // OR-ing shadow offset if more efficient (at least on x86) if the offset
339 // is a power of two, but on ppc64 we have to use add since the shadow
340 // offset is not necessary 1/8-th of the address space.
341 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1));
343 return Mapping;
344 }
346 static size_t RedzoneSizeForScale(int MappingScale) {
347 // Redzone used for stack and globals is at least 32 bytes.
348 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
349 return std::max(32U, 1U << MappingScale);
350 }
352 /// AddressSanitizer: instrument the code in module to find memory bugs.
353 struct AddressSanitizer : public FunctionPass {
354 AddressSanitizer() : FunctionPass(ID) {}
355 const char *getPassName() const override {
356 return "AddressSanitizerFunctionPass";
357 }
358 void instrumentMop(Instruction *I, bool UseCalls);
359 void instrumentPointerComparisonOrSubtraction(Instruction *I);
360 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
361 Value *Addr, uint32_t TypeSize, bool IsWrite,
362 Value *SizeArgument, bool UseCalls);
363 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
364 Value *ShadowValue, uint32_t TypeSize);
365 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
366 bool IsWrite, size_t AccessSizeIndex,
367 Value *SizeArgument);
368 void instrumentMemIntrinsic(MemIntrinsic *MI);
369 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
370 bool runOnFunction(Function &F) override;
371 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
372 bool doInitialization(Module &M) override;
373 static char ID; // Pass identification, replacement for typeid
375 private:
376 void initializeCallbacks(Module &M);
378 bool LooksLikeCodeInBug11395(Instruction *I);
379 bool GlobalIsLinkerInitialized(GlobalVariable *G);
380 bool InjectCoverage(Function &F, const ArrayRef<BasicBlock*> AllBlocks);
381 void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
383 LLVMContext *C;
384 const DataLayout *DL;
385 int LongSize;
386 Type *IntptrTy;
387 ShadowMapping Mapping;
388 Function *AsanCtorFunction;
389 Function *AsanInitFunction;
390 Function *AsanHandleNoReturnFunc;
391 Function *AsanCovFunction;
392 Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
393 // This array is indexed by AccessIsWrite and log2(AccessSize).
394 Function *AsanErrorCallback[2][kNumberOfAccessSizes];
395 Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
396 // This array is indexed by AccessIsWrite.
397 Function *AsanErrorCallbackSized[2],
398 *AsanMemoryAccessCallbackSized[2];
399 Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
400 InlineAsm *EmptyAsm;
401 GlobalsMetadata GlobalsMD;
403 friend struct FunctionStackPoisoner;
404 };
406 class AddressSanitizerModule : public ModulePass {
407 public:
408 AddressSanitizerModule() : ModulePass(ID) {}
409 bool runOnModule(Module &M) override;
410 static char ID; // Pass identification, replacement for typeid
411 const char *getPassName() const override {
412 return "AddressSanitizerModule";
413 }
415 private:
416 void initializeCallbacks(Module &M);
418 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
419 bool ShouldInstrumentGlobal(GlobalVariable *G);
420 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
421 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
422 size_t MinRedzoneSizeForGlobal() const {
423 return RedzoneSizeForScale(Mapping.Scale);
424 }
426 GlobalsMetadata GlobalsMD;
427 Type *IntptrTy;
428 LLVMContext *C;
429 const DataLayout *DL;
430 ShadowMapping Mapping;
431 Function *AsanPoisonGlobals;
432 Function *AsanUnpoisonGlobals;
433 Function *AsanRegisterGlobals;
434 Function *AsanUnregisterGlobals;
435 Function *AsanCovModuleInit;
436 };
438 // Stack poisoning does not play well with exception handling.
439 // When an exception is thrown, we essentially bypass the code
440 // that unpoisones the stack. This is why the run-time library has
441 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
442 // stack in the interceptor. This however does not work inside the
443 // actual function which catches the exception. Most likely because the
444 // compiler hoists the load of the shadow value somewhere too high.
445 // This causes asan to report a non-existing bug on 453.povray.
446 // It sounds like an LLVM bug.
447 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
448 Function &F;
449 AddressSanitizer &ASan;
450 DIBuilder DIB;
451 LLVMContext *C;
452 Type *IntptrTy;
453 Type *IntptrPtrTy;
454 ShadowMapping Mapping;
456 SmallVector<AllocaInst*, 16> AllocaVec;
457 SmallVector<Instruction*, 8> RetVec;
458 unsigned StackAlignment;
460 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
461 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
462 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
464 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
465 struct AllocaPoisonCall {
466 IntrinsicInst *InsBefore;
467 AllocaInst *AI;
468 uint64_t Size;
469 bool DoPoison;
470 };
471 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
473 // Maps Value to an AllocaInst from which the Value is originated.
474 typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy;
475 AllocaForValueMapTy AllocaForValue;
477 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
478 : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C),
479 IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
480 Mapping(ASan.Mapping),
481 StackAlignment(1 << Mapping.Scale) {}
483 bool runOnFunction() {
484 if (!ClStack) return false;
485 // Collect alloca, ret, lifetime instructions etc.
486 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
487 visit(*BB);
489 if (AllocaVec.empty()) return false;
491 initializeCallbacks(*F.getParent());
493 poisonStack();
495 if (ClDebugStack) {
496 DEBUG(dbgs() << F);
497 }
498 return true;
499 }
501 // Finds all static Alloca instructions and puts
502 // poisoned red zones around all of them.
503 // Then unpoison everything back before the function returns.
504 void poisonStack();
506 // ----------------------- Visitors.
507 /// \brief Collect all Ret instructions.
508 void visitReturnInst(ReturnInst &RI) {
509 RetVec.push_back(&RI);
510 }
512 /// \brief Collect Alloca instructions we want (and can) handle.
513 void visitAllocaInst(AllocaInst &AI) {
514 if (!isInterestingAlloca(AI)) return;
516 StackAlignment = std::max(StackAlignment, AI.getAlignment());
517 AllocaVec.push_back(&AI);
518 }
520 /// \brief Collect lifetime intrinsic calls to check for use-after-scope
521 /// errors.
522 void visitIntrinsicInst(IntrinsicInst &II) {
523 if (!ClCheckLifetime) return;
524 Intrinsic::ID ID = II.getIntrinsicID();
525 if (ID != Intrinsic::lifetime_start &&
526 ID != Intrinsic::lifetime_end)
527 return;
528 // Found lifetime intrinsic, add ASan instrumentation if necessary.
529 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
530 // If size argument is undefined, don't do anything.
531 if (Size->isMinusOne()) return;
532 // Check that size doesn't saturate uint64_t and can
533 // be stored in IntptrTy.
534 const uint64_t SizeValue = Size->getValue().getLimitedValue();
535 if (SizeValue == ~0ULL ||
536 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
537 return;
538 // Find alloca instruction that corresponds to llvm.lifetime argument.
539 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
540 if (!AI) return;
541 bool DoPoison = (ID == Intrinsic::lifetime_end);
542 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
543 AllocaPoisonCallVec.push_back(APC);
544 }
546 // ---------------------- Helpers.
547 void initializeCallbacks(Module &M);
549 // Check if we want (and can) handle this alloca.
550 bool isInterestingAlloca(AllocaInst &AI) const {
551 return (!AI.isArrayAllocation() && AI.isStaticAlloca() &&
552 AI.getAllocatedType()->isSized() &&
553 // alloca() may be called with 0 size, ignore it.
554 getAllocaSizeInBytes(&AI) > 0);
555 }
557 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
558 Type *Ty = AI->getAllocatedType();
559 uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
560 return SizeInBytes;
561 }
562 /// Finds alloca where the value comes from.
563 AllocaInst *findAllocaForValue(Value *V);
564 void poisonRedZones(const ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
565 Value *ShadowBase, bool DoPoison);
566 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
568 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase,
569 int Size);
570 };
572 } // namespace
574 char AddressSanitizer::ID = 0;
575 INITIALIZE_PASS(AddressSanitizer, "asan",
576 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
577 false, false)
578 FunctionPass *llvm::createAddressSanitizerFunctionPass() {
579 return new AddressSanitizer();
580 }
582 char AddressSanitizerModule::ID = 0;
583 INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
584 "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
585 "ModulePass", false, false)
586 ModulePass *llvm::createAddressSanitizerModulePass() {
587 return new AddressSanitizerModule();
588 }
590 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
591 size_t Res = countTrailingZeros(TypeSize / 8);
592 assert(Res < kNumberOfAccessSizes);
593 return Res;
594 }
596 // \brief Create a constant for Str so that we can pass it to the run-time lib.
597 static GlobalVariable *createPrivateGlobalForString(
598 Module &M, StringRef Str, bool AllowMerging) {
599 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
600 // We use private linkage for module-local strings. If they can be merged
601 // with another one, we set the unnamed_addr attribute.
602 GlobalVariable *GV =
603 new GlobalVariable(M, StrConst->getType(), true,
604 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
605 if (AllowMerging)
606 GV->setUnnamedAddr(true);
607 GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
608 return GV;
609 }
611 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
612 return G->getName().find(kAsanGenPrefix) == 0;
613 }
615 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
616 // Shadow >> scale
617 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
618 if (Mapping.Offset == 0)
619 return Shadow;
620 // (Shadow >> scale) | offset
621 if (Mapping.OrShadowOffset)
622 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
623 else
624 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
625 }
627 // Instrument memset/memmove/memcpy
628 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
629 IRBuilder<> IRB(MI);
630 if (isa<MemTransferInst>(MI)) {
631 IRB.CreateCall3(
632 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
633 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
634 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
635 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
636 } else if (isa<MemSetInst>(MI)) {
637 IRB.CreateCall3(
638 AsanMemset,
639 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
640 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
641 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
642 }
643 MI->eraseFromParent();
644 }
646 // If I is an interesting memory access, return the PointerOperand
647 // and set IsWrite/Alignment. Otherwise return NULL.
648 static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
649 unsigned *Alignment) {
650 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
651 if (!ClInstrumentReads) return nullptr;
652 *IsWrite = false;
653 *Alignment = LI->getAlignment();
654 return LI->getPointerOperand();
655 }
656 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
657 if (!ClInstrumentWrites) return nullptr;
658 *IsWrite = true;
659 *Alignment = SI->getAlignment();
660 return SI->getPointerOperand();
661 }
662 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
663 if (!ClInstrumentAtomics) return nullptr;
664 *IsWrite = true;
665 *Alignment = 0;
666 return RMW->getPointerOperand();
667 }
668 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
669 if (!ClInstrumentAtomics) return nullptr;
670 *IsWrite = true;
671 *Alignment = 0;
672 return XCHG->getPointerOperand();
673 }
674 return nullptr;
675 }
677 static bool isPointerOperand(Value *V) {
678 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
679 }
681 // This is a rough heuristic; it may cause both false positives and
682 // false negatives. The proper implementation requires cooperation with
683 // the frontend.
684 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
685 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
686 if (!Cmp->isRelational())
687 return false;
688 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
689 if (BO->getOpcode() != Instruction::Sub)
690 return false;
691 } else {
692 return false;
693 }
694 if (!isPointerOperand(I->getOperand(0)) ||
695 !isPointerOperand(I->getOperand(1)))
696 return false;
697 return true;
698 }
700 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
701 // If a global variable does not have dynamic initialization we don't
702 // have to instrument it. However, if a global does not have initializer
703 // at all, we assume it has dynamic initializer (in other TU).
704 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
705 }
707 void
708 AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
709 IRBuilder<> IRB(I);
710 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
711 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
712 for (int i = 0; i < 2; i++) {
713 if (Param[i]->getType()->isPointerTy())
714 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
715 }
716 IRB.CreateCall2(F, Param[0], Param[1]);
717 }
719 void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
720 bool IsWrite = false;
721 unsigned Alignment = 0;
722 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
723 assert(Addr);
724 if (ClOpt && ClOptGlobals) {
725 if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
726 // If initialization order checking is disabled, a simple access to a
727 // dynamically initialized global is always valid.
728 if (!ClInitializers || GlobalIsLinkerInitialized(G)) {
729 NumOptimizedAccessesToGlobalVar++;
730 return;
731 }
732 }
733 ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr);
734 if (CE && CE->isGEPWithNoNotionalOverIndexing()) {
735 if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
736 if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) {
737 NumOptimizedAccessesToGlobalArray++;
738 return;
739 }
740 }
741 }
742 }
744 Type *OrigPtrTy = Addr->getType();
745 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
747 assert(OrigTy->isSized());
748 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
750 assert((TypeSize % 8) == 0);
752 if (IsWrite)
753 NumInstrumentedWrites++;
754 else
755 NumInstrumentedReads++;
757 unsigned Granularity = 1 << Mapping.Scale;
758 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
759 // if the data is properly aligned.
760 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
761 TypeSize == 128) &&
762 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
763 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
764 // Instrument unusual size or unusual alignment.
765 // We can not do it with a single check, so we do 1-byte check for the first
766 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
767 // to report the actual access size.
768 IRBuilder<> IRB(I);
769 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
770 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
771 if (UseCalls) {
772 IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
773 } else {
774 Value *LastByte = IRB.CreateIntToPtr(
775 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
776 OrigPtrTy);
777 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
778 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
779 }
780 }
782 // Validate the result of Module::getOrInsertFunction called for an interface
783 // function of AddressSanitizer. If the instrumented module defines a function
784 // with the same name, their prototypes must match, otherwise
785 // getOrInsertFunction returns a bitcast.
786 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
787 if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
788 FuncOrBitcast->dump();
789 report_fatal_error("trying to redefine an AddressSanitizer "
790 "interface function");
791 }
793 Instruction *AddressSanitizer::generateCrashCode(
794 Instruction *InsertBefore, Value *Addr,
795 bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) {
796 IRBuilder<> IRB(InsertBefore);
797 CallInst *Call = SizeArgument
798 ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
799 : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
801 // We don't do Call->setDoesNotReturn() because the BB already has
802 // UnreachableInst at the end.
803 // This EmptyAsm is required to avoid callback merge.
804 IRB.CreateCall(EmptyAsm);
805 return Call;
806 }
808 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
809 Value *ShadowValue,
810 uint32_t TypeSize) {
811 size_t Granularity = 1 << Mapping.Scale;
812 // Addr & (Granularity - 1)
813 Value *LastAccessedByte = IRB.CreateAnd(
814 AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
815 // (Addr & (Granularity - 1)) + size - 1
816 if (TypeSize / 8 > 1)
817 LastAccessedByte = IRB.CreateAdd(
818 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
819 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
820 LastAccessedByte = IRB.CreateIntCast(
821 LastAccessedByte, ShadowValue->getType(), false);
822 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
823 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
824 }
826 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
827 Instruction *InsertBefore, Value *Addr,
828 uint32_t TypeSize, bool IsWrite,
829 Value *SizeArgument, bool UseCalls) {
830 IRBuilder<> IRB(InsertBefore);
831 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
832 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
834 if (UseCalls) {
835 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
836 AddrLong);
837 return;
838 }
840 Type *ShadowTy = IntegerType::get(
841 *C, std::max(8U, TypeSize >> Mapping.Scale));
842 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
843 Value *ShadowPtr = memToShadow(AddrLong, IRB);
844 Value *CmpVal = Constant::getNullValue(ShadowTy);
845 Value *ShadowValue = IRB.CreateLoad(
846 IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
848 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
849 size_t Granularity = 1 << Mapping.Scale;
850 TerminatorInst *CrashTerm = nullptr;
852 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
853 TerminatorInst *CheckTerm =
854 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
855 assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
856 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
857 IRB.SetInsertPoint(CheckTerm);
858 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
859 BasicBlock *CrashBlock =
860 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
861 CrashTerm = new UnreachableInst(*C, CrashBlock);
862 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
863 ReplaceInstWithInst(CheckTerm, NewTerm);
864 } else {
865 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
866 }
868 Instruction *Crash = generateCrashCode(
869 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument);
870 Crash->setDebugLoc(OrigIns->getDebugLoc());
871 }
873 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
874 GlobalValue *ModuleName) {
875 // Set up the arguments to our poison/unpoison functions.
876 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
878 // Add a call to poison all external globals before the given function starts.
879 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
880 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
882 // Add calls to unpoison all globals before each return instruction.
883 for (auto &BB : GlobalInit.getBasicBlockList())
884 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
885 CallInst::Create(AsanUnpoisonGlobals, "", RI);
886 }
888 void AddressSanitizerModule::createInitializerPoisonCalls(
889 Module &M, GlobalValue *ModuleName) {
890 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
892 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
893 for (Use &OP : CA->operands()) {
894 if (isa<ConstantAggregateZero>(OP))
895 continue;
896 ConstantStruct *CS = cast<ConstantStruct>(OP);
898 // Must have a function or null ptr.
899 // (CS->getOperand(0) is the init priority.)
900 if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
901 if (F->getName() != kAsanModuleCtorName)
902 poisonOneInitializer(*F, ModuleName);
903 }
904 }
905 }
907 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
908 Type *Ty = cast<PointerType>(G->getType())->getElementType();
909 DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
911 if (GlobalsMD.get(G).IsBlacklisted) return false;
912 if (GlobalsMD.isInstrumentationGlobal(G)) return false;
913 if (!Ty->isSized()) return false;
914 if (!G->hasInitializer()) return false;
915 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
916 // Touch only those globals that will not be defined in other modules.
917 // Don't handle ODR linkage types and COMDATs since other modules may be built
918 // without ASan.
919 if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
920 G->getLinkage() != GlobalVariable::PrivateLinkage &&
921 G->getLinkage() != GlobalVariable::InternalLinkage)
922 return false;
923 if (G->hasComdat())
924 return false;
925 // Two problems with thread-locals:
926 // - The address of the main thread's copy can't be computed at link-time.
927 // - Need to poison all copies, not just the main thread's one.
928 if (G->isThreadLocal())
929 return false;
930 // For now, just ignore this Global if the alignment is large.
931 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
933 // Ignore all the globals with the names starting with "\01L_OBJC_".
934 // Many of those are put into the .cstring section. The linker compresses
935 // that section by removing the spare \0s after the string terminator, so
936 // our redzones get broken.
937 if ((G->getName().find("\01L_OBJC_") == 0) ||
938 (G->getName().find("\01l_OBJC_") == 0)) {
939 DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G << "\n");
940 return false;
941 }
943 if (G->hasSection()) {
944 StringRef Section(G->getSection());
945 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
946 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
947 // them.
948 if (Section.startswith("__OBJC,") ||
949 Section.startswith("__DATA, __objc_")) {
950 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
951 return false;
952 }
953 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
954 // Constant CFString instances are compiled in the following way:
955 // -- the string buffer is emitted into
956 // __TEXT,__cstring,cstring_literals
957 // -- the constant NSConstantString structure referencing that buffer
958 // is placed into __DATA,__cfstring
959 // Therefore there's no point in placing redzones into __DATA,__cfstring.
960 // Moreover, it causes the linker to crash on OS X 10.7
961 if (Section.startswith("__DATA,__cfstring")) {
962 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
963 return false;
964 }
965 // The linker merges the contents of cstring_literals and removes the
966 // trailing zeroes.
967 if (Section.startswith("__TEXT,__cstring,cstring_literals")) {
968 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
969 return false;
970 }
972 // Callbacks put into the CRT initializer/terminator sections
973 // should not be instrumented.
974 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
975 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
976 if (Section.startswith(".CRT")) {
977 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
978 return false;
979 }
981 // Globals from llvm.metadata aren't emitted, do not instrument them.
982 if (Section == "llvm.metadata") return false;
983 }
985 return true;
986 }
988 void AddressSanitizerModule::initializeCallbacks(Module &M) {
989 IRBuilder<> IRB(*C);
990 // Declare our poisoning and unpoisoning functions.
991 AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
992 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL));
993 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
994 AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
995 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL));
996 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
997 // Declare functions that register/unregister globals.
998 AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
999 kAsanRegisterGlobalsName, IRB.getVoidTy(),
1000 IntptrTy, IntptrTy, NULL));
1001 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
1002 AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1003 kAsanUnregisterGlobalsName,
1004 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1005 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
1006 AsanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction(
1007 kAsanCovModuleInitName,
1008 IRB.getVoidTy(), IntptrTy, NULL));
1009 AsanCovModuleInit->setLinkage(Function::ExternalLinkage);
1010 }
1012 // This function replaces all global variables with new variables that have
1013 // trailing redzones. It also creates a function that poisons
1014 // redzones and inserts this function into llvm.global_ctors.
1015 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
1016 GlobalsMD.init(M);
1018 SmallVector<GlobalVariable *, 16> GlobalsToChange;
1020 for (auto &G : M.globals()) {
1021 if (ShouldInstrumentGlobal(&G))
1022 GlobalsToChange.push_back(&G);
1023 }
1025 size_t n = GlobalsToChange.size();
1026 if (n == 0) return false;
1028 // A global is described by a structure
1029 // size_t beg;
1030 // size_t size;
1031 // size_t size_with_redzone;
1032 // const char *name;
1033 // const char *module_name;
1034 // size_t has_dynamic_init;
1035 // void *source_location;
1036 // We initialize an array of such structures and pass it to a run-time call.
1037 StructType *GlobalStructTy =
1038 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
1039 IntptrTy, IntptrTy, NULL);
1040 SmallVector<Constant *, 16> Initializers(n);
1042 bool HasDynamicallyInitializedGlobals = false;
1044 // We shouldn't merge same module names, as this string serves as unique
1045 // module ID in runtime.
1046 GlobalVariable *ModuleName = createPrivateGlobalForString(
1047 M, M.getModuleIdentifier(), /*AllowMerging*/false);
1049 for (size_t i = 0; i < n; i++) {
1050 static const uint64_t kMaxGlobalRedzone = 1 << 18;
1051 GlobalVariable *G = GlobalsToChange[i];
1052 PointerType *PtrTy = cast<PointerType>(G->getType());
1053 Type *Ty = PtrTy->getElementType();
1054 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
1055 uint64_t MinRZ = MinRedzoneSizeForGlobal();
1056 // MinRZ <= RZ <= kMaxGlobalRedzone
1057 // and trying to make RZ to be ~ 1/4 of SizeInBytes.
1058 uint64_t RZ = std::max(MinRZ,
1059 std::min(kMaxGlobalRedzone,
1060 (SizeInBytes / MinRZ / 4) * MinRZ));
1061 uint64_t RightRedzoneSize = RZ;
1062 // Round up to MinRZ
1063 if (SizeInBytes % MinRZ)
1064 RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
1065 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
1066 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
1068 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
1069 Constant *NewInitializer = ConstantStruct::get(
1070 NewTy, G->getInitializer(),
1071 Constant::getNullValue(RightRedZoneTy), NULL);
1073 GlobalVariable *Name =
1074 createPrivateGlobalForString(M, G->getName(), /*AllowMerging*/true);
1076 // Create a new global variable with enough space for a redzone.
1077 GlobalValue::LinkageTypes Linkage = G->getLinkage();
1078 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
1079 Linkage = GlobalValue::InternalLinkage;
1080 GlobalVariable *NewGlobal = new GlobalVariable(
1081 M, NewTy, G->isConstant(), Linkage,
1082 NewInitializer, "", G, G->getThreadLocalMode());
1083 NewGlobal->copyAttributesFrom(G);
1084 NewGlobal->setAlignment(MinRZ);
1086 Value *Indices2[2];
1087 Indices2[0] = IRB.getInt32(0);
1088 Indices2[1] = IRB.getInt32(0);
1090 G->replaceAllUsesWith(
1091 ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true));
1092 NewGlobal->takeName(G);
1093 G->eraseFromParent();
1095 auto MD = GlobalsMD.get(G);
1097 Initializers[i] = ConstantStruct::get(
1098 GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
1099 ConstantInt::get(IntptrTy, SizeInBytes),
1100 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
1101 ConstantExpr::getPointerCast(Name, IntptrTy),
1102 ConstantExpr::getPointerCast(ModuleName, IntptrTy),
1103 ConstantInt::get(IntptrTy, MD.IsDynInit),
1104 MD.SourceLoc ? ConstantExpr::getPointerCast(MD.SourceLoc, IntptrTy)
1105 : ConstantInt::get(IntptrTy, 0),
1106 NULL);
1108 if (ClInitializers && MD.IsDynInit)
1109 HasDynamicallyInitializedGlobals = true;
1111 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
1112 }
1114 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
1115 GlobalVariable *AllGlobals = new GlobalVariable(
1116 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
1117 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
1119 // Create calls for poisoning before initializers run and unpoisoning after.
1120 if (HasDynamicallyInitializedGlobals)
1121 createInitializerPoisonCalls(M, ModuleName);
1122 IRB.CreateCall2(AsanRegisterGlobals,
1123 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1124 ConstantInt::get(IntptrTy, n));
1126 // We also need to unregister globals at the end, e.g. when a shared library
1127 // gets closed.
1128 Function *AsanDtorFunction = Function::Create(
1129 FunctionType::get(Type::getVoidTy(*C), false),
1130 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
1131 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
1132 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
1133 IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
1134 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1135 ConstantInt::get(IntptrTy, n));
1136 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
1138 DEBUG(dbgs() << M);
1139 return true;
1140 }
1142 bool AddressSanitizerModule::runOnModule(Module &M) {
1143 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1144 if (!DLP)
1145 return false;
1146 DL = &DLP->getDataLayout();
1147 C = &(M.getContext());
1148 int LongSize = DL->getPointerSizeInBits();
1149 IntptrTy = Type::getIntNTy(*C, LongSize);
1150 Mapping = getShadowMapping(M, LongSize);
1151 initializeCallbacks(M);
1153 bool Changed = false;
1155 Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
1156 assert(CtorFunc);
1157 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
1159 if (ClCoverage > 0) {
1160 Function *CovFunc = M.getFunction(kAsanCovName);
1161 int nCov = CovFunc ? CovFunc->getNumUses() : 0;
1162 IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
1163 Changed = true;
1164 }
1166 if (ClGlobals)
1167 Changed |= InstrumentGlobals(IRB, M);
1169 return Changed;
1170 }
1172 void AddressSanitizer::initializeCallbacks(Module &M) {
1173 IRBuilder<> IRB(*C);
1174 // Create __asan_report* callbacks.
1175 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
1176 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
1177 AccessSizeIndex++) {
1178 // IsWrite and TypeSize are encoded in the function name.
1179 std::string Suffix =
1180 (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
1181 AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
1182 checkInterfaceFunction(
1183 M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
1184 IRB.getVoidTy(), IntptrTy, NULL));
1185 AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
1186 checkInterfaceFunction(
1187 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
1188 IRB.getVoidTy(), IntptrTy, NULL));
1189 }
1190 }
1191 AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
1192 kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1193 AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
1194 kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1196 AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
1197 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
1198 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1199 AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
1200 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
1201 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1203 AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
1204 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
1205 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
1206 AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
1207 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
1208 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
1209 AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
1210 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
1211 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, NULL));
1213 AsanHandleNoReturnFunc = checkInterfaceFunction(
1214 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
1215 AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction(
1216 kAsanCovName, IRB.getVoidTy(), NULL));
1217 AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
1218 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1219 AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
1220 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1221 // We insert an empty inline asm after __asan_report* to avoid callback merge.
1222 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
1223 StringRef(""), StringRef(""),
1224 /*hasSideEffects=*/true);
1225 }
1227 // virtual
1228 bool AddressSanitizer::doInitialization(Module &M) {
1229 // Initialize the private fields. No one has accessed them before.
1230 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1231 if (!DLP)
1232 report_fatal_error("data layout missing");
1233 DL = &DLP->getDataLayout();
1235 GlobalsMD.init(M);
1237 C = &(M.getContext());
1238 LongSize = DL->getPointerSizeInBits();
1239 IntptrTy = Type::getIntNTy(*C, LongSize);
1241 AsanCtorFunction = Function::Create(
1242 FunctionType::get(Type::getVoidTy(*C), false),
1243 GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
1244 BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
1245 // call __asan_init in the module ctor.
1246 IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
1247 AsanInitFunction = checkInterfaceFunction(
1248 M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
1249 AsanInitFunction->setLinkage(Function::ExternalLinkage);
1250 IRB.CreateCall(AsanInitFunction);
1252 Mapping = getShadowMapping(M, LongSize);
1254 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
1255 return true;
1256 }
1258 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
1259 // For each NSObject descendant having a +load method, this method is invoked
1260 // by the ObjC runtime before any of the static constructors is called.
1261 // Therefore we need to instrument such methods with a call to __asan_init
1262 // at the beginning in order to initialize our runtime before any access to
1263 // the shadow memory.
1264 // We cannot just ignore these methods, because they may call other
1265 // instrumented functions.
1266 if (F.getName().find(" load]") != std::string::npos) {
1267 IRBuilder<> IRB(F.begin()->begin());
1268 IRB.CreateCall(AsanInitFunction);
1269 return true;
1270 }
1271 return false;
1272 }
1274 void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) {
1275 BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
1276 // Skip static allocas at the top of the entry block so they don't become
1277 // dynamic when we split the block. If we used our optimized stack layout,
1278 // then there will only be one alloca and it will come first.
1279 for (; IP != BE; ++IP) {
1280 AllocaInst *AI = dyn_cast<AllocaInst>(IP);
1281 if (!AI || !AI->isStaticAlloca())
1282 break;
1283 }
1285 DebugLoc EntryLoc = IP->getDebugLoc().getFnDebugLoc(*C);
1286 IRBuilder<> IRB(IP);
1287 IRB.SetCurrentDebugLocation(EntryLoc);
1288 Type *Int8Ty = IRB.getInt8Ty();
1289 GlobalVariable *Guard = new GlobalVariable(
1290 *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
1291 Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName());
1292 LoadInst *Load = IRB.CreateLoad(Guard);
1293 Load->setAtomic(Monotonic);
1294 Load->setAlignment(1);
1295 Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load);
1296 Instruction *Ins = SplitBlockAndInsertIfThen(
1297 Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
1298 IRB.SetInsertPoint(Ins);
1299 IRB.SetCurrentDebugLocation(EntryLoc);
1300 // We pass &F to __sanitizer_cov. We could avoid this and rely on
1301 // GET_CALLER_PC, but having the PC of the first instruction is just nice.
1302 IRB.CreateCall(AsanCovFunction);
1303 StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
1304 Store->setAtomic(Monotonic);
1305 Store->setAlignment(1);
1306 }
1308 // Poor man's coverage that works with ASan.
1309 // We create a Guard boolean variable with the same linkage
1310 // as the function and inject this code into the entry block (-asan-coverage=1)
1311 // or all blocks (-asan-coverage=2):
1312 // if (*Guard) {
1313 // __sanitizer_cov(&F);
1314 // *Guard = 1;
1315 // }
1316 // The accesses to Guard are atomic. The rest of the logic is
1317 // in __sanitizer_cov (it's fine to call it more than once).
1318 //
1319 // This coverage implementation provides very limited data:
1320 // it only tells if a given function (block) was ever executed.
1321 // No counters, no per-edge data.
1322 // But for many use cases this is what we need and the added slowdown
1323 // is negligible. This simple implementation will probably be obsoleted
1324 // by the upcoming Clang-based coverage implementation.
1325 // By having it here and now we hope to
1326 // a) get the functionality to users earlier and
1327 // b) collect usage statistics to help improve Clang coverage design.
1328 bool AddressSanitizer::InjectCoverage(Function &F,
1329 const ArrayRef<BasicBlock *> AllBlocks) {
1330 if (!ClCoverage) return false;
1332 if (ClCoverage == 1 ||
1333 (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
1334 InjectCoverageAtBlock(F, F.getEntryBlock());
1335 } else {
1336 for (auto BB : AllBlocks)
1337 InjectCoverageAtBlock(F, *BB);
1338 }
1339 return true;
1340 }
1342 bool AddressSanitizer::runOnFunction(Function &F) {
1343 if (&F == AsanCtorFunction) return false;
1344 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
1345 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
1346 initializeCallbacks(*F.getParent());
1348 // If needed, insert __asan_init before checking for SanitizeAddress attr.
1349 maybeInsertAsanInitAtFunctionEntry(F);
1351 if (!F.hasFnAttribute(Attribute::SanitizeAddress))
1352 return false;
1354 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
1355 return false;
1357 // We want to instrument every address only once per basic block (unless there
1358 // are calls between uses).
1359 SmallSet<Value*, 16> TempsToInstrument;
1360 SmallVector<Instruction*, 16> ToInstrument;
1361 SmallVector<Instruction*, 8> NoReturnCalls;
1362 SmallVector<BasicBlock*, 16> AllBlocks;
1363 SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
1364 int NumAllocas = 0;
1365 bool IsWrite;
1366 unsigned Alignment;
1368 // Fill the set of memory operations to instrument.
1369 for (auto &BB : F) {
1370 AllBlocks.push_back(&BB);
1371 TempsToInstrument.clear();
1372 int NumInsnsPerBB = 0;
1373 for (auto &Inst : BB) {
1374 if (LooksLikeCodeInBug11395(&Inst)) return false;
1375 if (Value *Addr =
1376 isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
1377 if (ClOpt && ClOptSameTemp) {
1378 if (!TempsToInstrument.insert(Addr))
1379 continue; // We've seen this temp in the current BB.
1380 }
1381 } else if (ClInvalidPointerPairs &&
1382 isInterestingPointerComparisonOrSubtraction(&Inst)) {
1383 PointerComparisonsOrSubtracts.push_back(&Inst);
1384 continue;
1385 } else if (isa<MemIntrinsic>(Inst)) {
1386 // ok, take it.
1387 } else {
1388 if (isa<AllocaInst>(Inst))
1389 NumAllocas++;
1390 CallSite CS(&Inst);
1391 if (CS) {
1392 // A call inside BB.
1393 TempsToInstrument.clear();
1394 if (CS.doesNotReturn())
1395 NoReturnCalls.push_back(CS.getInstruction());
1396 }
1397 continue;
1398 }
1399 ToInstrument.push_back(&Inst);
1400 NumInsnsPerBB++;
1401 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
1402 break;
1403 }
1404 }
1406 Function *UninstrumentedDuplicate = nullptr;
1407 bool LikelyToInstrument =
1408 !NoReturnCalls.empty() || !ToInstrument.empty() || (NumAllocas > 0);
1409 if (ClKeepUninstrumented && LikelyToInstrument) {
1410 ValueToValueMapTy VMap;
1411 UninstrumentedDuplicate = CloneFunction(&F, VMap, false);
1412 UninstrumentedDuplicate->removeFnAttr(Attribute::SanitizeAddress);
1413 UninstrumentedDuplicate->setName("NOASAN_" + F.getName());
1414 F.getParent()->getFunctionList().push_back(UninstrumentedDuplicate);
1415 }
1417 bool UseCalls = false;
1418 if (ClInstrumentationWithCallsThreshold >= 0 &&
1419 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
1420 UseCalls = true;
1422 // Instrument.
1423 int NumInstrumented = 0;
1424 for (auto Inst : ToInstrument) {
1425 if (ClDebugMin < 0 || ClDebugMax < 0 ||
1426 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
1427 if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
1428 instrumentMop(Inst, UseCalls);
1429 else
1430 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1431 }
1432 NumInstrumented++;
1433 }
1435 FunctionStackPoisoner FSP(F, *this);
1436 bool ChangedStack = FSP.runOnFunction();
1438 // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
1439 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
1440 for (auto CI : NoReturnCalls) {
1441 IRBuilder<> IRB(CI);
1442 IRB.CreateCall(AsanHandleNoReturnFunc);
1443 }
1445 for (auto Inst : PointerComparisonsOrSubtracts) {
1446 instrumentPointerComparisonOrSubtraction(Inst);
1447 NumInstrumented++;
1448 }
1450 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
1452 if (InjectCoverage(F, AllBlocks))
1453 res = true;
1455 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
1457 if (ClKeepUninstrumented) {
1458 if (!res) {
1459 // No instrumentation is done, no need for the duplicate.
1460 if (UninstrumentedDuplicate)
1461 UninstrumentedDuplicate->eraseFromParent();
1462 } else {
1463 // The function was instrumented. We must have the duplicate.
1464 assert(UninstrumentedDuplicate);
1465 UninstrumentedDuplicate->setSection("NOASAN");
1466 assert(!F.hasSection());
1467 F.setSection("ASAN");
1468 }
1469 }
1471 return res;
1472 }
1474 // Workaround for bug 11395: we don't want to instrument stack in functions
1475 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
1476 // FIXME: remove once the bug 11395 is fixed.
1477 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
1478 if (LongSize != 32) return false;
1479 CallInst *CI = dyn_cast<CallInst>(I);
1480 if (!CI || !CI->isInlineAsm()) return false;
1481 if (CI->getNumArgOperands() <= 5) return false;
1482 // We have inline assembly with quite a few arguments.
1483 return true;
1484 }
1486 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
1487 IRBuilder<> IRB(*C);
1488 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
1489 std::string Suffix = itostr(i);
1490 AsanStackMallocFunc[i] = checkInterfaceFunction(
1491 M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy,
1492 IntptrTy, IntptrTy, NULL));
1493 AsanStackFreeFunc[i] = checkInterfaceFunction(M.getOrInsertFunction(
1494 kAsanStackFreeNameTemplate + Suffix, IRB.getVoidTy(), IntptrTy,
1495 IntptrTy, IntptrTy, NULL));
1496 }
1497 AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
1498 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1499 AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
1500 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1501 }
1503 void
1504 FunctionStackPoisoner::poisonRedZones(const ArrayRef<uint8_t> ShadowBytes,
1505 IRBuilder<> &IRB, Value *ShadowBase,
1506 bool DoPoison) {
1507 size_t n = ShadowBytes.size();
1508 size_t i = 0;
1509 // We need to (un)poison n bytes of stack shadow. Poison as many as we can
1510 // using 64-bit stores (if we are on 64-bit arch), then poison the rest
1511 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores.
1512 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8;
1513 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) {
1514 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
1515 uint64_t Val = 0;
1516 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
1517 if (ASan.DL->isLittleEndian())
1518 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
1519 else
1520 Val = (Val << 8) | ShadowBytes[i + j];
1521 }
1522 if (!Val) continue;
1523 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1524 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8);
1525 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0);
1526 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo()));
1527 }
1528 }
1529 }
1531 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
1532 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
1533 static int StackMallocSizeClass(uint64_t LocalStackSize) {
1534 assert(LocalStackSize <= kMaxStackMallocSize);
1535 uint64_t MaxSize = kMinStackMallocSize;
1536 for (int i = 0; ; i++, MaxSize *= 2)
1537 if (LocalStackSize <= MaxSize)
1538 return i;
1539 llvm_unreachable("impossible LocalStackSize");
1540 }
1542 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic.
1543 // We can not use MemSet intrinsic because it may end up calling the actual
1544 // memset. Size is a multiple of 8.
1545 // Currently this generates 8-byte stores on x86_64; it may be better to
1546 // generate wider stores.
1547 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
1548 IRBuilder<> &IRB, Value *ShadowBase, int Size) {
1549 assert(!(Size % 8));
1550 assert(kAsanStackAfterReturnMagic == 0xf5);
1551 for (int i = 0; i < Size; i += 8) {
1552 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1553 IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL),
1554 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
1555 }
1556 }
1558 static DebugLoc getFunctionEntryDebugLocation(Function &F) {
1559 for (const auto &Inst : F.getEntryBlock())
1560 if (!isa<AllocaInst>(Inst))
1561 return Inst.getDebugLoc();
1562 return DebugLoc();
1563 }
1565 void FunctionStackPoisoner::poisonStack() {
1566 int StackMallocIdx = -1;
1567 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
1569 assert(AllocaVec.size() > 0);
1570 Instruction *InsBefore = AllocaVec[0];
1571 IRBuilder<> IRB(InsBefore);
1572 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1574 SmallVector<ASanStackVariableDescription, 16> SVD;
1575 SVD.reserve(AllocaVec.size());
1576 for (AllocaInst *AI : AllocaVec) {
1577 ASanStackVariableDescription D = { AI->getName().data(),
1578 getAllocaSizeInBytes(AI),
1579 AI->getAlignment(), AI, 0};
1580 SVD.push_back(D);
1581 }
1582 // Minimal header size (left redzone) is 4 pointers,
1583 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
1584 size_t MinHeaderSize = ASan.LongSize / 2;
1585 ASanStackFrameLayout L;
1586 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
1587 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
1588 uint64_t LocalStackSize = L.FrameSize;
1589 bool DoStackMalloc =
1590 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
1592 Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
1593 AllocaInst *MyAlloca =
1594 new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore);
1595 MyAlloca->setDebugLoc(EntryDebugLocation);
1596 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
1597 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
1598 MyAlloca->setAlignment(FrameAlignment);
1599 assert(MyAlloca->isStaticAlloca());
1600 Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy);
1601 Value *LocalStackBase = OrigStackBase;
1603 if (DoStackMalloc) {
1604 // LocalStackBase = OrigStackBase
1605 // if (__asan_option_detect_stack_use_after_return)
1606 // LocalStackBase = __asan_stack_malloc_N(LocalStackBase, OrigStackBase);
1607 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
1608 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
1609 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal(
1610 kAsanOptionDetectUAR, IRB.getInt32Ty());
1611 Value *Cmp = IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR),
1612 Constant::getNullValue(IRB.getInt32Ty()));
1613 Instruction *Term = SplitBlockAndInsertIfThen(Cmp, InsBefore, false);
1614 BasicBlock *CmpBlock = cast<Instruction>(Cmp)->getParent();
1615 IRBuilder<> IRBIf(Term);
1616 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1617 LocalStackBase = IRBIf.CreateCall2(
1618 AsanStackMallocFunc[StackMallocIdx],
1619 ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
1620 BasicBlock *SetBlock = cast<Instruction>(LocalStackBase)->getParent();
1621 IRB.SetInsertPoint(InsBefore);
1622 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1623 PHINode *Phi = IRB.CreatePHI(IntptrTy, 2);
1624 Phi->addIncoming(OrigStackBase, CmpBlock);
1625 Phi->addIncoming(LocalStackBase, SetBlock);
1626 LocalStackBase = Phi;
1627 }
1629 // Insert poison calls for lifetime intrinsics for alloca.
1630 bool HavePoisonedAllocas = false;
1631 for (const auto &APC : AllocaPoisonCallVec) {
1632 assert(APC.InsBefore);
1633 assert(APC.AI);
1634 IRBuilder<> IRB(APC.InsBefore);
1635 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
1636 HavePoisonedAllocas |= APC.DoPoison;
1637 }
1639 // Replace Alloca instructions with base+offset.
1640 for (const auto &Desc : SVD) {
1641 AllocaInst *AI = Desc.AI;
1642 Value *NewAllocaPtr = IRB.CreateIntToPtr(
1643 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
1644 AI->getType());
1645 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
1646 AI->replaceAllUsesWith(NewAllocaPtr);
1647 }
1649 // The left-most redzone has enough space for at least 4 pointers.
1650 // Write the Magic value to redzone[0].
1651 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
1652 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
1653 BasePlus0);
1654 // Write the frame description constant to redzone[1].
1655 Value *BasePlus1 = IRB.CreateIntToPtr(
1656 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)),
1657 IntptrPtrTy);
1658 GlobalVariable *StackDescriptionGlobal =
1659 createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
1660 /*AllowMerging*/true);
1661 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal,
1662 IntptrTy);
1663 IRB.CreateStore(Description, BasePlus1);
1664 // Write the PC to redzone[2].
1665 Value *BasePlus2 = IRB.CreateIntToPtr(
1666 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
1667 2 * ASan.LongSize/8)),
1668 IntptrPtrTy);
1669 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
1671 // Poison the stack redzones at the entry.
1672 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
1673 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
1675 // (Un)poison the stack before all ret instructions.
1676 for (auto Ret : RetVec) {
1677 IRBuilder<> IRBRet(Ret);
1678 // Mark the current frame as retired.
1679 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
1680 BasePlus0);
1681 if (DoStackMalloc) {
1682 assert(StackMallocIdx >= 0);
1683 // if LocalStackBase != OrigStackBase:
1684 // // In use-after-return mode, poison the whole stack frame.
1685 // if StackMallocIdx <= 4
1686 // // For small sizes inline the whole thing:
1687 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
1688 // **SavedFlagPtr(LocalStackBase) = 0
1689 // else
1690 // __asan_stack_free_N(LocalStackBase, OrigStackBase)
1691 // else
1692 // <This is not a fake stack; unpoison the redzones>
1693 Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase);
1694 TerminatorInst *ThenTerm, *ElseTerm;
1695 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
1697 IRBuilder<> IRBPoison(ThenTerm);
1698 if (StackMallocIdx <= 4) {
1699 int ClassSize = kMinStackMallocSize << StackMallocIdx;
1700 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase,
1701 ClassSize >> Mapping.Scale);
1702 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
1703 LocalStackBase,
1704 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
1705 Value *SavedFlagPtr = IRBPoison.CreateLoad(
1706 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
1707 IRBPoison.CreateStore(
1708 Constant::getNullValue(IRBPoison.getInt8Ty()),
1709 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
1710 } else {
1711 // For larger frames call __asan_stack_free_*.
1712 IRBPoison.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase,
1713 ConstantInt::get(IntptrTy, LocalStackSize),
1714 OrigStackBase);
1715 }
1717 IRBuilder<> IRBElse(ElseTerm);
1718 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false);
1719 } else if (HavePoisonedAllocas) {
1720 // If we poisoned some allocas in llvm.lifetime analysis,
1721 // unpoison whole stack frame now.
1722 assert(LocalStackBase == OrigStackBase);
1723 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
1724 } else {
1725 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false);
1726 }
1727 }
1729 // We are done. Remove the old unused alloca instructions.
1730 for (auto AI : AllocaVec)
1731 AI->eraseFromParent();
1732 }
1734 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
1735 IRBuilder<> &IRB, bool DoPoison) {
1736 // For now just insert the call to ASan runtime.
1737 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
1738 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
1739 IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc
1740 : AsanUnpoisonStackMemoryFunc,
1741 AddrArg, SizeArg);
1742 }
1744 // Handling llvm.lifetime intrinsics for a given %alloca:
1745 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
1746 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
1747 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
1748 // could be poisoned by previous llvm.lifetime.end instruction, as the
1749 // variable may go in and out of scope several times, e.g. in loops).
1750 // (3) if we poisoned at least one %alloca in a function,
1751 // unpoison the whole stack frame at function exit.
1753 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
1754 if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
1755 // We're intested only in allocas we can handle.
1756 return isInterestingAlloca(*AI) ? AI : nullptr;
1757 // See if we've already calculated (or started to calculate) alloca for a
1758 // given value.
1759 AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
1760 if (I != AllocaForValue.end())
1761 return I->second;
1762 // Store 0 while we're calculating alloca for value V to avoid
1763 // infinite recursion if the value references itself.
1764 AllocaForValue[V] = nullptr;
1765 AllocaInst *Res = nullptr;
1766 if (CastInst *CI = dyn_cast<CastInst>(V))
1767 Res = findAllocaForValue(CI->getOperand(0));
1768 else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1769 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1770 Value *IncValue = PN->getIncomingValue(i);
1771 // Allow self-referencing phi-nodes.
1772 if (IncValue == PN) continue;
1773 AllocaInst *IncValueAI = findAllocaForValue(IncValue);
1774 // AI for incoming values should exist and should all be equal.
1775 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
1776 return nullptr;
1777 Res = IncValueAI;
1778 }
1779 }
1780 if (Res)
1781 AllocaForValue[V] = Res;
1782 return Res;
1783 }