
As of rev ea222be0d, LLVMs assembler will actually try to honour the "fill value" part of p2align directives. X86 printed these as 0x90, which isn't actually what it wanted: we want multi-byte nops for .text padding. Compiling via a textual assembly file produces single-byte nop padding since ea222be0d but the built-in assembler will produce multi-byte nops. This divergent behaviour is undesirable. To fix: don't set the byte padding field for x86, which allows the assembler to pick multi-byte nops. Test that we get the same multi-byte padding when compiled via textual assembly or directly to object file. Added same-align-bytes-with-llasm-llobj.ll to that effect, updated numerous other tests to not contain check-lines for the explicit padding.
45 lines
1.7 KiB
LLVM
45 lines
1.7 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
|
|
; RUN: llc < %s -mtriple=x86_64- -mcpu=x86-64-v4 | FileCheck %s
|
|
|
|
define void @PR93000(ptr %a0, ptr %a1, ptr %a2, <32 x i16> %a3) {
|
|
; CHECK-LABEL: PR93000:
|
|
; CHECK: # %bb.0: # %Entry
|
|
; CHECK-NEXT: movl (%rdi), %eax
|
|
; CHECK-NEXT: addq $4, %rdi
|
|
; CHECK-NEXT: .p2align 4
|
|
; CHECK-NEXT: .LBB0_1: # %Loop
|
|
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: kmovd %eax, %k1
|
|
; CHECK-NEXT: knotd %k1, %k2
|
|
; CHECK-NEXT: vpblendmw (%rsi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqu16 (%rdx), %zmm1 {%k2}
|
|
; CHECK-NEXT: vmovdqu64 %zmm1, (%rsi)
|
|
; CHECK-NEXT: movl (%rdi), %eax
|
|
; CHECK-NEXT: addq $4, %rdi
|
|
; CHECK-NEXT: testl %eax, %eax
|
|
; CHECK-NEXT: jne .LBB0_1
|
|
; CHECK-NEXT: # %bb.2: # %Then
|
|
; CHECK-NEXT: vzeroupper
|
|
; CHECK-NEXT: retq
|
|
Entry:
|
|
%pre = load i32, ptr %a0, align 4
|
|
br label %Loop
|
|
|
|
Loop: ; preds = %Loop, %Entry
|
|
%p = phi i32 [ %limit, %Loop ], [ %pre, %Entry ]
|
|
%lsr.iv.pn = phi ptr [ %lsr.iv, %Loop ], [ %a0, %Entry ]
|
|
%lsr.iv = getelementptr i8, ptr %lsr.iv.pn, i64 4
|
|
%pn = xor i32 %p, -1
|
|
%m = bitcast i32 %p to <32 x i1>
|
|
%mn = bitcast i32 %pn to <32 x i1>
|
|
%mload0 = tail call <32 x i16> @llvm.masked.load.v32i16.p0(ptr %a1, i32 2, <32 x i1> %m, <32 x i16> %a3)
|
|
%mload1 = tail call <32 x i16> @llvm.masked.load.v32i16.p0(ptr %a2, i32 2, <32 x i1> %mn, <32 x i16> %mload0)
|
|
store <32 x i16> %mload1, ptr %a1, align 2
|
|
%limit = load i32, ptr %lsr.iv, align 4
|
|
%icmp = icmp eq i32 %limit, 0
|
|
br i1 %icmp, label %Then, label %Loop
|
|
|
|
Then: ; preds = %Loop
|
|
ret void
|
|
}
|