
Relative to the first attempt, this contains two changes: First, we only handle the case where one side simplifies to true or false, instead of calling simplification recursively. The previous approach would return poison if one operand simplified to poison (under the equality assumption), which is incorrect. Second, we do not fold llvm.is.constant in simplifyWithOpReplaced(). We may be assuming that a value is constant, if the equality holds, but it may not actually be constant. This is nominally just a QoI issue, but the std::list implementation in libstdc++ relies on the precise behavior in a way that causes miscompiles. ----- and/or in logical (select) form benefit from generic simplifications via simplifyWithOpReplaced(). However, the corresponding fold for plain and/or currently does not exist. Similar to selects, there are two general cases for this fold (illustrated with `and`, but there are `or` conjugates). The basic case is something like `(a == b) & c`, where the replacement of a with b or b with a inside c allows it to fold to true or false. Then the whole operation will fold to either false or `a == b`. The second case is something like `(a != b) & c`, where the replacement inside c allows it to fold to false. In that case, the operand can be replaced with c, because in the case where a == b (and thus the icmp is false), c itself will already be false. As the test diffs show, this catches quite a lot of patterns in existing test coverage. This also obsoletes quite a few existing special-case and/or of icmp folds we have (e.g. simplifyAndOrOfICmpsWithLimitConst), but I haven't removed anything as part of this patch in the interest of risk mitigation. Fixes #69050. Fixes #69091.
114 lines
3.8 KiB
LLVM
114 lines
3.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt %s -passes=instsimplify -S | FileCheck %s
|
|
|
|
; Here we add unsigned two values, check that addition did not underflow AND
|
|
; that the result is non-zero. This can be simplified just to a comparison
|
|
; between the base and negated offset.
|
|
|
|
declare void @use8(i8)
|
|
|
|
declare void @use1(i1)
|
|
declare void @llvm.assume(i1)
|
|
|
|
; If we are checking that the result is not null or no underflow happened,
|
|
; it is tautological (always-true).
|
|
define i1 @t1(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t1(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: ret i1 true
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp ne i8 %adjusted, 0
|
|
%no_underflow = icmp ult i8 %adjusted, %base
|
|
%r = or i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
define i1 @t2_commutative(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t2_commutative(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: ret i1 true
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp ne i8 %adjusted, 0
|
|
%no_underflow = icmp ugt i8 %base, %adjusted
|
|
%r = or i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
|
|
; If we are checking that the result is null and underflow happened,
|
|
; it is tautological (always-false).
|
|
define i1 @t3(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t3(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: ret i1 false
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp eq i8 %adjusted, 0
|
|
%no_underflow = icmp uge i8 %adjusted, %base
|
|
%r = and i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
define i1 @t4_commutative(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t4_commutative(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: ret i1 false
|
|
;
|
|
%cmp = icmp slt i8 %base, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp eq i8 %adjusted, 0
|
|
%no_underflow = icmp ule i8 %base, %adjusted
|
|
%r = and i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|
|
|
|
; We only need to know that any of the 'add' operands is non-zero,
|
|
; not necessarily the one used in the comparison.
|
|
define i1 @t5(i8 %base, i8 %offset) {
|
|
; CHECK-LABEL: @t5(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
|
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
|
|
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
|
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
|
|
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
|
|
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
|
; CHECK-NEXT: ret i1 [[R]]
|
|
;
|
|
%cmp = icmp slt i8 %offset, 0
|
|
call void @llvm.assume(i1 %cmp)
|
|
|
|
%adjusted = add i8 %base, %offset
|
|
call void @use8(i8 %adjusted)
|
|
%not_null = icmp ne i8 %adjusted, 0
|
|
%no_underflow = icmp ult i8 %adjusted, %base
|
|
%r = or i1 %not_null, %no_underflow
|
|
ret i1 %r
|
|
}
|