1 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
2 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-unknown-linux-gnu"
7 ; Check the presence of __msan_init
8 ; CHECK: @llvm.global_ctors {{.*}} @__msan_init
10 ; Check the presence and the linkage type of __msan_track_origins and
11 ; other interface symbols.
12 ; CHECK-NOT: @__msan_track_origins
13 ; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
14 ; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
15 ; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
16 ; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
17 ; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
18 ; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
19 ; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
20 ; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
21 ; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
24 ; Check instrumentation of stores
26 define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
27 entry:
28 store i32 %x, i32* %p, align 4
29 ret void
30 }
32 ; CHECK: @Store
33 ; CHECK: load {{.*}} @__msan_param_tls
34 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
35 ; CHECK: store
36 ; CHECK-ORIGINS: icmp
37 ; CHECK-ORIGINS: br i1
38 ; CHECK-ORIGINS: <label>
39 ; CHECK-ORIGINS: store
40 ; CHECK-ORIGINS: br label
41 ; CHECK-ORIGINS: <label>
42 ; CHECK: store
43 ; CHECK: ret void
46 ; Check instrumentation of aligned stores
47 ; Shadow store has the same alignment as the original store; origin store
48 ; does not specify explicit alignment.
50 define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
51 entry:
52 store i32 %x, i32* %p, align 32
53 ret void
54 }
56 ; CHECK: @AlignedStore
57 ; CHECK: load {{.*}} @__msan_param_tls
58 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
59 ; CHECK: store {{.*}} align 32
60 ; CHECK-ORIGINS: icmp
61 ; CHECK-ORIGINS: br i1
62 ; CHECK-ORIGINS: <label>
63 ; CHECK-ORIGINS: store {{.*}} align 32
64 ; CHECK-ORIGINS: br label
65 ; CHECK-ORIGINS: <label>
66 ; CHECK: store {{.*}} align 32
67 ; CHECK: ret void
70 ; load followed by cmp: check that we load the shadow and call __msan_warning.
71 define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
72 entry:
73 %0 = load i32* %a, align 4
74 %tobool = icmp eq i32 %0, 0
75 br i1 %tobool, label %if.end, label %if.then
77 if.then: ; preds = %entry
78 tail call void (...)* @foo() nounwind
79 br label %if.end
81 if.end: ; preds = %entry, %if.then
82 ret void
83 }
85 declare void @foo(...)
87 ; CHECK: @LoadAndCmp
88 ; CHECK: = load
89 ; CHECK: = load
90 ; CHECK: call void @__msan_warning_noreturn()
91 ; CHECK-NEXT: call void asm sideeffect
92 ; CHECK-NEXT: unreachable
93 ; CHECK: ret void
95 ; Check that we store the shadow for the retval.
96 define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
97 entry:
98 ret i32 123
99 }
101 ; CHECK: @ReturnInt
102 ; CHECK: store i32 0,{{.*}}__msan_retval_tls
103 ; CHECK: ret i32
105 ; Check that we get the shadow for the retval.
106 define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
107 entry:
108 %call = tail call i32 @ReturnInt() nounwind
109 store i32 %call, i32* %a, align 4
110 ret void
111 }
113 ; CHECK: @CopyRetVal
114 ; CHECK: load{{.*}}__msan_retval_tls
115 ; CHECK: store
116 ; CHECK: store
117 ; CHECK: ret void
120 ; Check that we generate PHIs for shadow.
121 define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
122 entry:
123 %tobool = icmp eq i32* %b, null
124 br i1 %tobool, label %if.else, label %if.then
126 if.then: ; preds = %entry
127 %0 = load i32* %b, align 4
128 br label %if.end
130 if.else: ; preds = %entry
131 %1 = load i32* %c, align 4
132 br label %if.end
134 if.end: ; preds = %if.else, %if.then
135 %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
136 store i32 %t.0, i32* %a, align 4
137 ret void
138 }
140 ; CHECK: @FuncWithPhi
141 ; CHECK: = phi
142 ; CHECK-NEXT: = phi
143 ; CHECK: store
144 ; CHECK: store
145 ; CHECK: ret void
147 ; Compute shadow for "x << 10"
148 define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
149 entry:
150 %0 = load i32* %x, align 4
151 %1 = shl i32 %0, 10
152 store i32 %1, i32* %x, align 4
153 ret void
154 }
156 ; CHECK: @ShlConst
157 ; CHECK: = load
158 ; CHECK: = load
159 ; CHECK: shl
160 ; CHECK: shl
161 ; CHECK: store
162 ; CHECK: store
163 ; CHECK: ret void
165 ; Compute shadow for "10 << x": it should have 'sext i1'.
166 define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
167 entry:
168 %0 = load i32* %x, align 4
169 %1 = shl i32 10, %0
170 store i32 %1, i32* %x, align 4
171 ret void
172 }
174 ; CHECK: @ShlNonConst
175 ; CHECK: = load
176 ; CHECK: = load
177 ; CHECK: = sext i1
178 ; CHECK: store
179 ; CHECK: store
180 ; CHECK: ret void
182 ; SExt
183 define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
184 entry:
185 %0 = load i16* %b, align 2
186 %1 = sext i16 %0 to i32
187 store i32 %1, i32* %a, align 4
188 ret void
189 }
191 ; CHECK: @SExt
192 ; CHECK: = load
193 ; CHECK: = load
194 ; CHECK: = sext
195 ; CHECK: = sext
196 ; CHECK: store
197 ; CHECK: store
198 ; CHECK: ret void
201 ; memset
202 define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
203 entry:
204 call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false)
205 ret void
206 }
208 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
210 ; CHECK: @MemSet
211 ; CHECK: call i8* @__msan_memset
212 ; CHECK: ret void
215 ; memcpy
216 define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
217 entry:
218 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
219 ret void
220 }
222 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
224 ; CHECK: @MemCpy
225 ; CHECK: call i8* @__msan_memcpy
226 ; CHECK: ret void
229 ; memmove is lowered to a call
230 define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
231 entry:
232 call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
233 ret void
234 }
236 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
238 ; CHECK: @MemMove
239 ; CHECK: call i8* @__msan_memmove
240 ; CHECK: ret void
243 ; Check that we propagate shadow for "select"
245 define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
246 entry:
247 %cond = select i1 %c, i32 %a, i32 %b
248 ret i32 %cond
249 }
251 ; CHECK: @Select
252 ; CHECK: select i1
253 ; CHECK-DAG: or i32
254 ; CHECK-DAG: xor i32
255 ; CHECK: or i32
256 ; CHECK-DAG: select i1
257 ; CHECK-ORIGINS-DAG: select
258 ; CHECK-ORIGINS-DAG: select
259 ; CHECK-DAG: select i1
260 ; CHECK: store i32{{.*}}@__msan_retval_tls
261 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
262 ; CHECK: ret i32
265 ; Check that we propagate origin for "select" with vector condition.
266 ; Select condition is flattened to i1, which is then used to select one of the
267 ; argument origins.
269 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
270 entry:
271 %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
272 ret <8 x i16> %cond
273 }
275 ; CHECK: @SelectVector
276 ; CHECK: select <8 x i1>
277 ; CHECK-DAG: or <8 x i16>
278 ; CHECK-DAG: xor <8 x i16>
279 ; CHECK: or <8 x i16>
280 ; CHECK-DAG: select <8 x i1>
281 ; CHECK-ORIGINS-DAG: select
282 ; CHECK-ORIGINS-DAG: select
283 ; CHECK-DAG: select <8 x i1>
284 ; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
285 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
286 ; CHECK: ret <8 x i16>
289 ; Check that we propagate origin for "select" with scalar condition and vector
290 ; arguments. Select condition shadow is sign-extended to the vector type and
291 ; mixed into the result shadow.
293 define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
294 entry:
295 %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
296 ret <8 x i16> %cond
297 }
299 ; CHECK: @SelectVector2
300 ; CHECK: select i1
301 ; CHECK-DAG: or <8 x i16>
302 ; CHECK-DAG: xor <8 x i16>
303 ; CHECK: or <8 x i16>
304 ; CHECK-DAG: select i1
305 ; CHECK-ORIGINS-DAG: select i1
306 ; CHECK-ORIGINS-DAG: select i1
307 ; CHECK-DAG: select i1
308 ; CHECK: ret <8 x i16>
311 define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
312 entry:
313 %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
314 ret { i64, i64 } %c
315 }
317 ; CHECK: @SelectStruct
318 ; CHECK: select i1 {{.*}}, { i64, i64 }
319 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
320 ; CHECK-ORIGINS: select i1
321 ; CHECK-ORIGINS: select i1
322 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
323 ; CHECK: ret { i64, i64 }
326 define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
327 entry:
328 %c = select i1 %x, { i64*, double } %a, { i64*, double } %b
329 ret { i64*, double } %c
330 }
332 ; CHECK: @SelectStruct2
333 ; CHECK: select i1 {{.*}}, { i64, i64 }
334 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
335 ; CHECK-ORIGINS: select i1
336 ; CHECK-ORIGINS: select i1
337 ; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
338 ; CHECK: ret { i64*, double }
341 define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
342 entry:
343 %0 = inttoptr i64 %x to i8*
344 ret i8* %0
345 }
347 ; CHECK: @IntToPtr
348 ; CHECK: load i64*{{.*}}__msan_param_tls
349 ; CHECK-ORIGINS-NEXT: load i32*{{.*}}__msan_param_origin_tls
350 ; CHECK-NEXT: inttoptr
351 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
352 ; CHECK: ret i8*
355 define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
356 entry:
357 %0 = inttoptr i16 %x to i8*
358 ret i8* %0
359 }
361 ; CHECK: @IntToPtr_ZExt
362 ; CHECK: load i16*{{.*}}__msan_param_tls
363 ; CHECK: zext
364 ; CHECK-NEXT: inttoptr
365 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
366 ; CHECK: ret i8*
369 ; Check that we insert exactly one check on udiv
370 ; (2nd arg shadow is checked, 1st arg shadow is propagated)
372 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
373 entry:
374 %div = udiv i32 %a, %b
375 ret i32 %div
376 }
378 ; CHECK: @Div
379 ; CHECK: icmp
380 ; CHECK: call void @__msan_warning
381 ; CHECK-NOT: icmp
382 ; CHECK: udiv
383 ; CHECK-NOT: icmp
384 ; CHECK: ret i32
387 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
389 define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone sanitize_memory {
390 %1 = icmp slt i32 %x, 0
391 ret i1 %1
392 }
394 ; CHECK: @ICmpSLT
395 ; CHECK: icmp slt
396 ; CHECK-NOT: call void @__msan_warning
397 ; CHECK: icmp slt
398 ; CHECK-NOT: call void @__msan_warning
399 ; CHECK: ret i1
401 define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone sanitize_memory {
402 %1 = icmp sge i32 %x, 0
403 ret i1 %1
404 }
406 ; CHECK: @ICmpSGE
407 ; CHECK: icmp slt
408 ; CHECK-NOT: call void @__msan_warning
409 ; CHECK: icmp sge
410 ; CHECK-NOT: call void @__msan_warning
411 ; CHECK: ret i1
413 define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone sanitize_memory {
414 %1 = icmp sgt i32 0, %x
415 ret i1 %1
416 }
418 ; CHECK: @ICmpSGT
419 ; CHECK: icmp slt
420 ; CHECK-NOT: call void @__msan_warning
421 ; CHECK: icmp sgt
422 ; CHECK-NOT: call void @__msan_warning
423 ; CHECK: ret i1
425 define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone sanitize_memory {
426 %1 = icmp sle i32 0, %x
427 ret i1 %1
428 }
430 ; CHECK: @ICmpSLE
431 ; CHECK: icmp slt
432 ; CHECK-NOT: call void @__msan_warning
433 ; CHECK: icmp sle
434 ; CHECK-NOT: call void @__msan_warning
435 ; CHECK: ret i1
438 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
439 ; of the vector arguments.
441 define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
442 %1 = icmp slt <2 x i32*> %x, zeroinitializer
443 ret <2 x i1> %1
444 }
446 ; CHECK: @ICmpSLT_vector
447 ; CHECK: icmp slt <2 x i64>
448 ; CHECK-NOT: call void @__msan_warning
449 ; CHECK: icmp slt <2 x i32*>
450 ; CHECK-NOT: call void @__msan_warning
451 ; CHECK: ret <2 x i1>
454 ; Check that we propagate shadow for unsigned relational comparisons with
455 ; constants
457 define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
458 entry:
459 %cmp = icmp ugt i32 %x, 7
460 ret i1 %cmp
461 }
463 ; CHECK: @ICmpUGTConst
464 ; CHECK: icmp ugt i32
465 ; CHECK-NOT: call void @__msan_warning
466 ; CHECK: icmp ugt i32
467 ; CHECK-NOT: call void @__msan_warning
468 ; CHECK: icmp ugt i32
469 ; CHECK-NOT: call void @__msan_warning
470 ; CHECK: ret i1
473 ; Check that loads of shadow have the same aligment as the original loads.
474 ; Check that loads of origin have the aligment of max(4, original alignment).
476 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
477 %y = alloca i32, align 64
478 %1 = load volatile i32* %y, align 64
479 ret i32 %1
480 }
482 ; CHECK: @ShadowLoadAlignmentLarge
483 ; CHECK: load volatile i32* {{.*}} align 64
484 ; CHECK: load i32* {{.*}} align 64
485 ; CHECK: ret i32
487 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
488 %y = alloca i32, align 2
489 %1 = load volatile i32* %y, align 2
490 ret i32 %1
491 }
493 ; CHECK: @ShadowLoadAlignmentSmall
494 ; CHECK: load volatile i32* {{.*}} align 2
495 ; CHECK: load i32* {{.*}} align 2
496 ; CHECK-ORIGINS: load i32* {{.*}} align 4
497 ; CHECK: ret i32
500 ; Test vector manipulation instructions.
501 ; Check that the same bit manipulation is applied to the shadow values.
502 ; Check that there is a zero test of the shadow of %idx argument, where present.
504 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
505 %x = extractelement <4 x i32> %vec, i32 %idx
506 ret i32 %x
507 }
509 ; CHECK: @ExtractElement
510 ; CHECK: extractelement
511 ; CHECK: call void @__msan_warning
512 ; CHECK: extractelement
513 ; CHECK: ret i32
515 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
516 %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
517 ret <4 x i32> %vec1
518 }
520 ; CHECK: @InsertElement
521 ; CHECK: insertelement
522 ; CHECK: call void @__msan_warning
523 ; CHECK: insertelement
524 ; CHECK: ret <4 x i32>
526 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
527 %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
528 <4 x i32> <i32 0, i32 4, i32 1, i32 5>
529 ret <4 x i32> %vec2
530 }
532 ; CHECK: @ShuffleVector
533 ; CHECK: shufflevector
534 ; CHECK-NOT: call void @__msan_warning
535 ; CHECK: shufflevector
536 ; CHECK: ret <4 x i32>
539 ; Test bswap intrinsic instrumentation
540 define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
541 %y = tail call i32 @llvm.bswap.i32(i32 %x)
542 ret i32 %y
543 }
545 declare i32 @llvm.bswap.i32(i32) nounwind readnone
547 ; CHECK: @BSwap
548 ; CHECK-NOT: call void @__msan_warning
549 ; CHECK: @llvm.bswap.i32
550 ; CHECK-NOT: call void @__msan_warning
551 ; CHECK: @llvm.bswap.i32
552 ; CHECK-NOT: call void @__msan_warning
553 ; CHECK: ret i32
556 ; Store intrinsic.
558 define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
559 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
560 ret void
561 }
563 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
565 ; CHECK: @StoreIntrinsic
566 ; CHECK-NOT: br
567 ; CHECK-NOT: = or
568 ; CHECK: store <4 x i32> {{.*}} align 1
569 ; CHECK: call void @llvm.x86.sse.storeu.ps
570 ; CHECK: ret void
573 ; Load intrinsic.
575 define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
576 %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
577 ret <16 x i8> %call
578 }
580 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
582 ; CHECK: @LoadIntrinsic
583 ; CHECK: load <16 x i8>* {{.*}} align 1
584 ; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}}
585 ; CHECK-NOT: br
586 ; CHECK-NOT: = or
587 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
588 ; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
589 ; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
590 ; CHECK: ret <16 x i8>
593 ; Simple NoMem intrinsic
594 ; Check that shadow is OR'ed, and origin is Select'ed
595 ; And no shadow checks!
597 define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
598 %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
599 ret <8 x i16> %call
600 }
602 declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
604 ; CHECK: @Paddsw128
605 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
606 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
607 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
608 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
609 ; CHECK-NEXT: = or <8 x i16>
610 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
611 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
612 ; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
613 ; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
614 ; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
615 ; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
616 ; CHECK-NEXT: ret <8 x i16>
619 ; Test handling of vectors of pointers.
620 ; Check that shadow of such vector is a vector of integers.
622 define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
623 %x = load <8 x i8*>* %p
624 ret <8 x i8*> %x
625 }
627 ; CHECK: @VectorOfPointers
628 ; CHECK: load <8 x i8*>*
629 ; CHECK: load <8 x i64>*
630 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
631 ; CHECK: ret <8 x i8*>
633 ; Test handling of va_copy.
635 declare void @llvm.va_copy(i8*, i8*) nounwind
637 define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
638 call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
639 ret void
640 }
642 ; CHECK: @VACopy
643 ; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false)
644 ; CHECK: ret void
647 ; Test that va_start instrumentation does not use va_arg_tls*.
648 ; It should work with a local stack copy instead.
650 %struct.__va_list_tag = type { i32, i32, i8*, i8* }
651 declare void @llvm.va_start(i8*) nounwind
653 ; Function Attrs: nounwind uwtable
654 define void @VAStart(i32 %x, ...) sanitize_memory {
655 entry:
656 %x.addr = alloca i32, align 4
657 %va = alloca [1 x %struct.__va_list_tag], align 16
658 store i32 %x, i32* %x.addr, align 4
659 %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
660 %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
661 call void @llvm.va_start(i8* %arraydecay1)
662 ret void
663 }
665 ; CHECK: @VAStart
666 ; CHECK: call void @llvm.va_start
667 ; CHECK-NOT: @__msan_va_arg_tls
668 ; CHECK-NOT: @__msan_va_arg_overflow_size_tls
669 ; CHECK: ret void
672 ; Test handling of volatile stores.
673 ; Check that MemorySanitizer does not add a check of the value being stored.
675 define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
676 entry:
677 store volatile i32 %x, i32* %p, align 4
678 ret void
679 }
681 ; CHECK: @VolatileStore
682 ; CHECK-NOT: @__msan_warning
683 ; CHECK: ret void
686 ; Test that checks are omitted and returned value is always initialized if
687 ; sanitize_memory attribute is missing.
689 define i32 @NoSanitizeMemory(i32 %x) uwtable {
690 entry:
691 %tobool = icmp eq i32 %x, 0
692 br i1 %tobool, label %if.end, label %if.then
694 if.then: ; preds = %entry
695 tail call void @bar()
696 br label %if.end
698 if.end: ; preds = %entry, %if.then
699 ret i32 %x
700 }
702 declare void @bar()
704 ; CHECK: @NoSanitizeMemory
705 ; CHECK-NOT: @__msan_warning
706 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
707 ; CHECK-NOT: @__msan_warning
708 ; CHECK: ret i32
711 ; Test that stack allocations are unpoisoned in functions missing
712 ; sanitize_memory attribute
714 define i32 @NoSanitizeMemoryAlloca() {
715 entry:
716 %p = alloca i32, align 4
717 %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
718 ret i32 %x
719 }
721 declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
723 ; CHECK: @NoSanitizeMemoryAlloca
724 ; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false)
725 ; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
726 ; CHECK: ret i32
729 ; Test that undef is unpoisoned in functions missing
730 ; sanitize_memory attribute
732 define i32 @NoSanitizeMemoryUndef() {
733 entry:
734 %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
735 ret i32 %x
736 }
738 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
740 ; CHECK: @NoSanitizeMemoryAlloca
741 ; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
742 ; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
743 ; CHECK: ret i32
746 ; Test PHINode instrumentation in blacklisted functions
748 define i32 @NoSanitizeMemoryPHI(i32 %x) {
749 entry:
750 %tobool = icmp ne i32 %x, 0
751 br i1 %tobool, label %cond.true, label %cond.false
753 cond.true: ; preds = %entry
754 br label %cond.end
756 cond.false: ; preds = %entry
757 br label %cond.end
759 cond.end: ; preds = %cond.false, %cond.true
760 %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
761 ret i32 %cond
762 }
764 ; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
765 ; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
766 ; CHECK: ret i32 [[A]]
769 ; Test that there are no __msan_param_origin_tls stores when
770 ; argument shadow is a compile-time zero constant (which is always the case
771 ; in functions missing sanitize_memory attribute).
773 define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
774 entry:
775 %0 = load i32* %x, align 4
776 %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
777 ret i32 %call
778 }
780 declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
782 ; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
783 ; CHECK-NOT: __msan_param_origin_tls
784 ; CHECK: ret i32
787 ; Test argument shadow alignment
789 define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
790 entry:
791 ret <2 x i64> %b
792 }
794 ; CHECK: @ArgumentShadowAlignment
795 ; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
796 ; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
797 ; CHECK: ret <2 x i64>
800 ; Test origin propagation for insertvalue
802 define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
803 entry:
804 %a = insertvalue { i64, i32 } undef, i64 %x, 0
805 %b = insertvalue { i64, i32 } %a, i32 %y, 1
806 ret { i64, i32 } %b
807 }
809 ; CHECK-ORIGINS: @make_pair_64_32
810 ; First element shadow
811 ; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
812 ; First element origin
813 ; CHECK-ORIGINS: icmp ne i64
814 ; CHECK-ORIGINS: select i1
815 ; First element app value
816 ; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
817 ; Second element shadow
818 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
819 ; Second element origin
820 ; CHECK-ORIGINS: icmp ne i32
821 ; CHECK-ORIGINS: select i1
822 ; Second element app value
823 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
824 ; CHECK-ORIGINS: ret { i64, i32 }
827 ; Test shadow propagation for aggregates passed through ellipsis.
829 %struct.StructByVal = type { i32, i32, i32, i32 }
831 declare void @VAArgStructFn(i32 %guard, ...)
833 define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
834 entry:
835 %agg.tmp2 = alloca %struct.StructByVal, align 8
836 %0 = bitcast %struct.StructByVal* %s to i8*
837 %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
838 %agg.tmp.sroa.0.0.copyload = load i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
839 %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal* %s, i64 0, i32 2
840 %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
841 %agg.tmp.sroa.2.0.copyload = load i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
842 %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
843 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %0, i64 16, i32 4, i1 false)
844 call void (i32, ...)* @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
845 ret void
846 }
848 ; "undef" and the first 2 structs go to general purpose registers;
849 ; the third struct goes to the overflow area byval
851 ; CHECK: @VAArgStruct
852 ; undef
853 ; CHECK: store i32 -1, i32* {{.*}}@__msan_va_arg_tls {{.*}}, align 8
854 ; first struct through general purpose registers
855 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
856 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
857 ; second struct through general purpose registers
858 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
859 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
860 ; third struct through the overflow area byval
861 ; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
862 ; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176
863 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
864 ; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
865 ; CHECK: call void (i32, ...)* @VAArgStructFn
866 ; CHECK: ret void
868 declare i32 @InnerTailCall(i32 %a)
870 define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
871 %b = tail call i32 @InnerTailCall(i32 %a)
872 ret void
873 }
875 ; We used to strip off the 'tail' modifier, but now that we unpoison return slot
876 ; shadow before the call, we don't need to anymore.
878 ; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
879 ; CHECK: tail call i32 @InnerTailCall
880 ; CHECK: ret void
882 declare i32 @InnerMustTailCall(i32 %a)
884 define i32 @MustTailCall(i32 %a) {
885 %b = musttail call i32 @InnerMustTailCall(i32 %a)
886 ret i32 %b
887 }
889 ; Test that 'musttail' is preserved. The ABI should make this work.
891 ; CHECK-LABEL: define i32 @MustTailCall
892 ; CHECK: musttail call i32 @InnerMustTailCall
893 ; CHECK-NEXT: ret i32