
Reapply after https://github.com/llvm/llvm-project/pull/93956, which changed clang array initialization codegen to avoid size regressions for unoptimized builds. ----- This fold is subtly incorrect, because DL-unaware constant folding does not know the correct index type to use, and just performs the addition in the type that happens to already be there. This is incorrect, since sext(X)+sext(Y) is generally not the same as sext(X+Y). See the `@constexpr_gep_of_gep_with_narrow_type()` for a miscompile with the current implementation. One could try to restrict the fold to cases where no overflow occurs, but I'm not bothering with that here, because the DL-aware constant folding will take care of this anyway. I've only kept the straightforward zero-index case, where we just concatenate two GEPs.
19 lines
843 B
LLVM
19 lines
843 B
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
|
|
; RUN: opt < %s -S -passes=instcombine | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
|
target triple = "x86_64-unknown-linux-gnu"
|
|
|
|
@buffer = external global [64 x float]
|
|
|
|
declare void @use(ptr)
|
|
|
|
define void @f() {
|
|
; CHECK-LABEL: define void @f() {
|
|
; CHECK-NEXT: call void @use(ptr getelementptr (i8, ptr getelementptr (i8, ptr @buffer, i64 add (i64 sub (i64 0, i64 ptrtoint (ptr @buffer to i64)), i64 63)), i64 64))
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
call void @use(ptr getelementptr (i8, ptr getelementptr (i8, ptr @buffer, i64 add (i64 sub (i64 0, i64 ptrtoint (ptr @buffer to i64)), i64 63)), i64 64))
|
|
ret void
|
|
}
|