Nikita Popov 90ba33099c
[InstCombine] Canonicalize constant GEPs to i8 source element type (#68882)
This patch canonicalizes getelementptr instructions with constant
indices to use the `i8` source element type. This makes it easier for
optimizations to recognize that two GEPs are identical, because they
don't need to see past many different ways to express the same offset.

This is a first step towards
https://discourse.llvm.org/t/rfc-replacing-getelementptr-with-ptradd/68699.
This is limited to constant GEPs only for now, as they have a clear
canonical form, while we're not yet sure how exactly to deal with
variable indices.

The test llvm/test/Transforms/PhaseOrdering/switch_with_geps.ll gives
two representative examples of the kind of optimization improvement we
expect from this change. In the first test SimplifyCFG can now realize
that all switch branches are actually the same. In the second test it
can convert it into simple arithmetic. These are representative of
common optimization failures we see in Rust.

Fixes https://github.com/llvm/llvm-project/issues/69841.
2024-01-24 15:25:29 +01:00

340 lines
13 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
;
; Exercise folding of strncmp calls with constant arrays and nonconstant
; sizes.
declare i32 @strncmp(ptr, ptr, i64)
@ax = external constant [8 x i8]
@a01230123 = constant [8 x i8] c"01230123"
@b01230123 = constant [8 x i8] c"01230123"
@c01230129 = constant [8 x i8] c"01230129"
@d9123_12 = constant [7 x i8] c"9123\0012"
@e9123_34 = constant [7 x i8] c"9123\0034"
; Exercise strncmp(A, B, N) folding of arrays with the same bytes.
define void @fold_strncmp_a_b_n(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_strncmp_a_b_n(
; CHECK-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[N:%.*]], 0
; CHECK-NEXT: [[C0_1:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: [[S0_1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
; CHECK-NEXT: store i32 [[C0_1]], ptr [[S0_1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_2:%.*]] = sext i1 [[TMP2]] to i32
; CHECK-NEXT: [[S0_2:%.*]] = getelementptr i8, ptr [[PCMP]], i64 8
; CHECK-NEXT: store i32 [[C0_2]], ptr [[S0_2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_3:%.*]] = sext i1 [[TMP3]] to i32
; CHECK-NEXT: [[S0_3:%.*]] = getelementptr i8, ptr [[PCMP]], i64 12
; CHECK-NEXT: store i32 [[C0_3]], ptr [[S0_3]], align 4
; CHECK-NEXT: [[S0_4:%.*]] = getelementptr i8, ptr [[PCMP]], i64 16
; CHECK-NEXT: store i32 0, ptr [[S0_4]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_5:%.*]] = sext i1 [[TMP4]] to i32
; CHECK-NEXT: [[S0_5:%.*]] = getelementptr i8, ptr [[PCMP]], i64 20
; CHECK-NEXT: store i32 [[C0_5]], ptr [[S0_5]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C5_0:%.*]] = zext i1 [[TMP5]] to i32
; CHECK-NEXT: [[S5_0:%.*]] = getelementptr i8, ptr [[PCMP]], i64 24
; CHECK-NEXT: store i32 [[C5_0]], ptr [[S5_0]], align 4
; CHECK-NEXT: ret void
;
%q1 = getelementptr [8 x i8], ptr @b01230123, i64 0, i64 1
%q2 = getelementptr [8 x i8], ptr @b01230123, i64 0, i64 2
%q3 = getelementptr [8 x i8], ptr @b01230123, i64 0, i64 3
%q4 = getelementptr [8 x i8], ptr @b01230123, i64 0, i64 4
%q5 = getelementptr [8 x i8], ptr @b01230123, i64 0, i64 5
; Fold strncmp(a, b, n) to 0.
%c0_0 = call i32 @strncmp(ptr @a01230123, ptr @b01230123, i64 %n)
store i32 %c0_0, ptr %pcmp
; Fold strncmp(a, b + 1, n) to N != 0 ? -1 : 0.
%c0_1 = call i32 @strncmp(ptr @a01230123, ptr %q1, i64 %n)
%s0_1 = getelementptr i32, ptr %pcmp, i64 1
store i32 %c0_1, ptr %s0_1
; Fold strncmp(a, b + 2, n) to N != 0 ? -1 : 0.
%c0_2 = call i32 @strncmp(ptr @a01230123, ptr %q2, i64 %n)
%s0_2 = getelementptr i32, ptr %pcmp, i64 2
store i32 %c0_2, ptr %s0_2
; Fold strncmp(a, b + 3, n) to N != 0 ? -1 : 0.
%c0_3 = call i32 @strncmp(ptr @a01230123, ptr %q3, i64 %n)
%s0_3 = getelementptr i32, ptr %pcmp, i64 3
store i32 %c0_3, ptr %s0_3
; Fold strncmp(a, b + 4, n) to 0.
%c0_4 = call i32 @strncmp(ptr @a01230123, ptr %q4, i64 %n)
%s0_4 = getelementptr i32, ptr %pcmp, i64 4
store i32 %c0_4, ptr %s0_4
; Fold strncmp(a, b + 5, n) to N != 0 ? -1 : 0.
%c0_5 = call i32 @strncmp(ptr @a01230123, ptr %q5, i64 %n)
%s0_5 = getelementptr i32, ptr %pcmp, i64 5
store i32 %c0_5, ptr %s0_5
; Fold strncmp(b + 5, a, n) to N != 0 ? +1 : 0.
%c5_0 = call i32 @strncmp(ptr %q5, ptr @a01230123, i64 %n)
%s5_0 = getelementptr i32, ptr %pcmp, i64 6
store i32 %c5_0, ptr %s5_0
ret void
}
; Vefify that a strncmp() call involving a constant array with unknown
; contents is not folded.
define void @call_strncmp_a_ax_n(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @call_strncmp_a_ax_n(
; CHECK-NEXT: [[C0_0:%.*]] = call i32 @strncmp(ptr nonnull @a01230123, ptr nonnull @ax, i64 [[N:%.*]])
; CHECK-NEXT: store i32 [[C0_0]], ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: ret void
;
; Do not fold strncmp(a, ax, n).
%c0_0 = call i32 @strncmp(ptr @a01230123, ptr @ax, i64 %n)
store i32 %c0_0, ptr %pcmp
ret void
}
; Exercise strncmp(A, C, N) folding of arrays with the same leading bytes
; but a difference in the trailing byte.
define void @fold_strncmp_a_c_n(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_strncmp_a_c_n(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[N:%.*]], 7
; CHECK-NEXT: [[C0_0:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: store i32 [[C0_0]], ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_1:%.*]] = sext i1 [[TMP2]] to i32
; CHECK-NEXT: [[S0_1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
; CHECK-NEXT: store i32 [[C0_1]], ptr [[S0_1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_2:%.*]] = sext i1 [[TMP3]] to i32
; CHECK-NEXT: [[S0_2:%.*]] = getelementptr i8, ptr [[PCMP]], i64 8
; CHECK-NEXT: store i32 [[C0_2]], ptr [[S0_2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_3:%.*]] = sext i1 [[TMP4]] to i32
; CHECK-NEXT: [[S0_3:%.*]] = getelementptr i8, ptr [[PCMP]], i64 12
; CHECK-NEXT: store i32 [[C0_3]], ptr [[S0_3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 [[N]], 3
; CHECK-NEXT: [[C0_4:%.*]] = sext i1 [[TMP5]] to i32
; CHECK-NEXT: [[S0_4:%.*]] = getelementptr i8, ptr [[PCMP]], i64 16
; CHECK-NEXT: store i32 [[C0_4]], ptr [[S0_4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[N]], 3
; CHECK-NEXT: [[C0_5:%.*]] = sext i1 [[TMP6]] to i32
; CHECK-NEXT: [[S0_5:%.*]] = getelementptr i8, ptr [[PCMP]], i64 20
; CHECK-NEXT: store i32 [[C0_5]], ptr [[S0_5]], align 4
; CHECK-NEXT: ret void
;
%q1 = getelementptr [8 x i8], ptr @c01230129, i64 0, i64 1
%q2 = getelementptr [8 x i8], ptr @c01230129, i64 0, i64 2
%q3 = getelementptr [8 x i8], ptr @c01230129, i64 0, i64 3
%q4 = getelementptr [8 x i8], ptr @c01230129, i64 0, i64 4
%q5 = getelementptr [8 x i8], ptr @c01230129, i64 0, i64 5
; Fold strncmp(a, c, n) to N > 7 ? -1 : 0.
%c0_0 = call i32 @strncmp(ptr @a01230123, ptr @c01230129, i64 %n)
store i32 %c0_0, ptr %pcmp
; Fold strncmp(a, c + 1, n) to N != 0 ? -1 : 0.
%c0_1 = call i32 @strncmp(ptr @a01230123, ptr %q1, i64 %n)
%s0_1 = getelementptr i32, ptr %pcmp, i64 1
store i32 %c0_1, ptr %s0_1
; Fold strncmp(a, c + 2, n) to N != 0 ? -1 : 0.
%c0_2 = call i32 @strncmp(ptr @a01230123, ptr %q2, i64 %n)
%s0_2 = getelementptr i32, ptr %pcmp, i64 2
store i32 %c0_2, ptr %s0_2
; Fold strncmp(a, c + 3, n) to N != 0 ? -1 : 0.
%c0_3 = call i32 @strncmp(ptr @a01230123, ptr %q3, i64 %n)
%s0_3 = getelementptr i32, ptr %pcmp, i64 3
store i32 %c0_3, ptr %s0_3
; Fold strncmp(a, c + 4, n) to N > 3 ? -1 : 0.
%c0_4 = call i32 @strncmp(ptr @a01230123, ptr %q4, i64 %n)
%s0_4 = getelementptr i32, ptr %pcmp, i64 4
store i32 %c0_4, ptr %s0_4
; Fold strncmp(a, c + 5, n) to N != 0 ? -1 : 0.
%c0_5 = call i32 @strncmp(ptr @a01230123, ptr %q4, i64 %n)
%s0_5 = getelementptr i32, ptr %pcmp, i64 5
store i32 %c0_5, ptr %s0_5
ret void
}
; Exercise strncmp(A, D, N) folding of arrays of different sizes and
; a difference in the leading byte.
define void @fold_strncmp_a_d_n(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_strncmp_a_d_n(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[N:%.*]], 0
; CHECK-NEXT: [[C0_0:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: store i32 [[C0_0]], ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C0_1:%.*]] = sext i1 [[TMP2]] to i32
; CHECK-NEXT: [[S0_1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
; CHECK-NEXT: store i32 [[C0_1]], ptr [[S0_1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[N]], 3
; CHECK-NEXT: [[C1_1:%.*]] = zext i1 [[TMP3]] to i32
; CHECK-NEXT: [[S1_1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 8
; CHECK-NEXT: store i32 [[C1_1]], ptr [[S1_1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[N]], 2
; CHECK-NEXT: [[C2_2:%.*]] = zext i1 [[TMP4]] to i32
; CHECK-NEXT: [[S2_2:%.*]] = getelementptr i8, ptr [[PCMP]], i64 12
; CHECK-NEXT: store i32 [[C2_2]], ptr [[S2_2]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C4_4:%.*]] = zext i1 [[TMP5]] to i32
; CHECK-NEXT: [[S4_4:%.*]] = getelementptr i8, ptr [[PCMP]], i64 16
; CHECK-NEXT: store i32 [[C4_4]], ptr [[S4_4]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C4_4_2:%.*]] = sext i1 [[TMP6]] to i32
; CHECK-NEXT: [[S4_4_2:%.*]] = getelementptr i8, ptr [[PCMP]], i64 20
; CHECK-NEXT: store i32 [[C4_4_2]], ptr [[S4_4_2]], align 4
; CHECK-NEXT: [[S5_5:%.*]] = getelementptr i8, ptr [[PCMP]], i64 24
; CHECK-NEXT: store i32 0, ptr [[S5_5]], align 4
; CHECK-NEXT: [[S6_6:%.*]] = getelementptr i8, ptr [[PCMP]], i64 28
; CHECK-NEXT: store i32 0, ptr [[S6_6]], align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr [8 x i8], ptr @a01230123, i64 0, i64 1
%p2 = getelementptr [8 x i8], ptr @a01230123, i64 0, i64 2
%p3 = getelementptr [8 x i8], ptr @a01230123, i64 0, i64 3
%p4 = getelementptr [8 x i8], ptr @a01230123, i64 0, i64 4
%p5 = getelementptr [8 x i8], ptr @a01230123, i64 0, i64 5
%p6 = getelementptr [8 x i8], ptr @a01230123, i64 0, i64 6
%q1 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 1
%q2 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 2
%q3 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 3
%q4 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 4
%q5 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 5
%q6 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 6
; Fold strncmp(a, d, n) to N != 0 ? -1 : 0.
%c0_0 = call i32 @strncmp(ptr @a01230123, ptr @d9123_12, i64 %n)
store i32 %c0_0, ptr %pcmp
; Fold strncmp(a, d + 1, n) to N != 0 ? -1 : 0.
%c0_1 = call i32 @strncmp(ptr @a01230123, ptr %q1, i64 %n)
%s0_1 = getelementptr i32, ptr %pcmp, i64 1
store i32 %c0_1, ptr %s0_1
; Fold strncmp(a + 1, d + 1, n) N > 3 ? +1 : 0.
%c1_1 = call i32 @strncmp(ptr %p1, ptr %q1, i64 %n)
%s1_1 = getelementptr i32, ptr %pcmp, i64 2
store i32 %c1_1, ptr %s1_1
; Fold strncmp(a + 2, d + 2, n) N > 2 ? +1 : 0.
%c2_2 = call i32 @strncmp(ptr %p2, ptr %q2, i64 %n)
%s2_2 = getelementptr i32, ptr %pcmp, i64 3
store i32 %c2_2, ptr %s2_2
; Fold strncmp(a + 3, d + 3, n) N > 1 ? +1 : 0.
%c3_3 = call i32 @strncmp(ptr %p3, ptr %q3, i64 %n)
%s3_3 = getelementptr i32, ptr %pcmp, i64 4
store i32 %c3_3, ptr %s3_3
; Fold strncmp(a + 4, d + 4, n) N != 0 ? +1 : 0.
%c4_4 = call i32 @strncmp(ptr %p4, ptr %q4, i64 %n)
%s4_4 = getelementptr i32, ptr %pcmp, i64 4
store i32 %c4_4, ptr %s4_4
; Fold strncmp(d + 4, a + 4, n) N != 0 ? -1 : 0 (same as above but
; with the array arguments reversed).
%c4_4_2 = call i32 @strncmp(ptr %q4, ptr %p4, i64 %n)
%s4_4_2 = getelementptr i32, ptr %pcmp, i64 5
store i32 %c4_4_2, ptr %s4_4_2
; Fold strncmp(a + 5, d + 5, n) to 0.
%c5_5 = call i32 @strncmp(ptr %p5, ptr %q5, i64 %n)
%s5_5 = getelementptr i32, ptr %pcmp, i64 6
store i32 %c5_5, ptr %s5_5
; Fold strncmp(a + 6, d + 6, n) to 0.
%c6_6 = call i32 @strncmp(ptr %p6, ptr %q6, i64 %n)
%s6_6 = getelementptr i32, ptr %pcmp, i64 7
store i32 %c6_6, ptr %s6_6
ret void
}
; Exercise strncmp(A, D, N) folding of arrays with the same bytes and
; a nonzero size.
define void @fold_strncmp_a_d_nz(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_strncmp_a_d_nz(
; CHECK-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: ret void
;
%nz = or i64 %n, 1
%c0_0 = call i32 @strncmp(ptr @a01230123, ptr @d9123_12, i64 %nz)
store i32 %c0_0, ptr %pcmp
ret void
}
; Exercise strncmp(D, E, N) folding of equal strings but unequal arrays.
define void @fold_strncmp_d_e_n(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_strncmp_d_e_n(
; CHECK-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[N:%.*]], 0
; CHECK-NEXT: [[C0_1:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[S0_1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
; CHECK-NEXT: store i32 [[C0_1]], ptr [[S0_1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[C1_0:%.*]] = sext i1 [[TMP2]] to i32
; CHECK-NEXT: [[S1_0:%.*]] = getelementptr i8, ptr [[PCMP]], i64 8
; CHECK-NEXT: store i32 [[C1_0]], ptr [[S1_0]], align 4
; CHECK-NEXT: [[S1_1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 12
; CHECK-NEXT: store i32 0, ptr [[S1_1]], align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr [7 x i8], ptr @d9123_12, i64 0, i64 1
%q1 = getelementptr [7 x i8], ptr @e9123_34, i64 0, i64 1
; Fold to 0.
%c0_0 = call i32 @strncmp(ptr @d9123_12, ptr @e9123_34, i64 %n)
store i32 %c0_0, ptr %pcmp
; Fold to N ? +1 : 0.
%c0_1 = call i32 @strncmp(ptr @d9123_12, ptr %q1, i64 %n)
%s0_1 = getelementptr i32, ptr %pcmp, i64 1
store i32 %c0_1, ptr %s0_1
; Fold to N ? -1 : 0.
%c1_0 = call i32 @strncmp(ptr %p1, ptr @e9123_34, i64 %n)
%s1_0 = getelementptr i32, ptr %pcmp, i64 2
store i32 %c1_0, ptr %s1_0
; Fold to 0.
%c1_1 = call i32 @strncmp(ptr %p1, ptr %q1, i64 %n)
%s1_1 = getelementptr i32, ptr %pcmp, i64 3
store i32 %c1_1, ptr %s1_1
ret void
}