Kunqiu Chen 355725a25e
[TSan] Fix missing inst cleanup (#144067)
Commit 44e875ad5b2ce26826dd53f9e7d1a71436c86212 introduced a change that
replaces `ReplaceInstWithInst` with `Instruction::replaceAllUsesWith`,
without subsequent instruction cleanup.

This results in TSan leaving behind useless `load atomic` instructions
after 'replacing' them.

This commit adds cleanup back, consistent with the context.
2025-06-18 17:09:32 +08:00

89 lines
3.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes=tsan -S | FileCheck %s
; Check that atomic memory operations on floating-point types are converted to calls into ThreadSanitizer runtime.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
define float @load_float(ptr %fptr) {
; CHECK-LABEL: define float @load_float(
; CHECK-SAME: ptr [[FPTR:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @__tsan_atomic32_load(ptr [[FPTR]], i32 0)
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[TMP2]] to float
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK-NEXT: ret float [[TMP3]]
;
%v = load atomic float, ptr %fptr unordered, align 4
ret float %v
}
define double @load_double(ptr %fptr) {
; CHECK-LABEL: define double @load_double(
; CHECK-SAME: ptr [[FPTR:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @__tsan_atomic64_load(ptr [[FPTR]], i32 0)
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[TMP2]] to double
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK-NEXT: ret double [[TMP3]]
;
%v = load atomic double, ptr %fptr unordered, align 8
ret double %v
}
define fp128 @load_fp128(ptr %fptr) {
; CHECK-LABEL: define fp128 @load_fp128(
; CHECK-SAME: ptr [[FPTR:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = call i128 @__tsan_atomic128_load(ptr [[FPTR]], i32 0)
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i128 [[TMP2]] to fp128
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK-NEXT: ret fp128 [[TMP3]]
;
%v = load atomic fp128, ptr %fptr unordered, align 16
ret fp128 %v
}
define void @store_float(ptr %fptr, float %v) {
; CHECK-LABEL: define void @store_float(
; CHECK-SAME: ptr [[FPTR:%.*]], float [[V:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[V]] to i32
; CHECK-NEXT: call void @__tsan_atomic32_store(ptr [[FPTR]], i32 [[TMP2]], i32 0)
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK-NEXT: ret void
;
store atomic float %v, ptr %fptr unordered, align 4
ret void
}
define void @store_double(ptr %fptr, double %v) {
; CHECK-LABEL: define void @store_double(
; CHECK-SAME: ptr [[FPTR:%.*]], double [[V:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[V]] to i64
; CHECK-NEXT: call void @__tsan_atomic64_store(ptr [[FPTR]], i64 [[TMP2]], i32 0)
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK-NEXT: ret void
;
store atomic double %v, ptr %fptr unordered, align 8
ret void
}
define void @store_fp128(ptr %fptr, fp128 %v) {
; CHECK-LABEL: define void @store_fp128(
; CHECK-SAME: ptr [[FPTR:%.*]], fp128 [[V:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = bitcast fp128 [[V]] to i128
; CHECK-NEXT: call void @__tsan_atomic128_store(ptr [[FPTR]], i128 [[TMP2]], i32 0)
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK-NEXT: ret void
;
store atomic fp128 %v, ptr %fptr unordered, align 16
ret void
}