; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64LE ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_32 define i32 @test_Greater_than(ptr %colauths) { ; This testcase is for the special case of zero-vector comparisons. ; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor). ; This pattern is expected to be optimized in a future patch. ; POWERPC_64LE-LABEL: test_Greater_than: ; POWERPC_64LE: # %bb.0: # %entry ; POWERPC_64LE-NEXT: lfd 0, 0(3) ; POWERPC_64LE-NEXT: xxlxor 35, 35, 35 ; POWERPC_64LE-NEXT: li 4, 0 ; POWERPC_64LE-NEXT: li 3, 4 ; POWERPC_64LE-NEXT: xxswapd 34, 0 ; POWERPC_64LE-NEXT: vcmpequh 2, 2, 3 ; POWERPC_64LE-NEXT: xxlnor 34, 34, 34 ; POWERPC_64LE-NEXT: vmrglh 3, 2, 2 ; POWERPC_64LE-NEXT: vextuwrx 4, 4, 2 ; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 ; POWERPC_64LE-NEXT: clrlwi 4, 4, 31 ; POWERPC_64LE-NEXT: rlwimi 4, 3, 1, 30, 30 ; POWERPC_64LE-NEXT: mfvsrwz 3, 35 ; POWERPC_64LE-NEXT: rlwimi 4, 3, 2, 29, 29 ; POWERPC_64LE-NEXT: li 3, 12 ; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 ; POWERPC_64LE-NEXT: rlwimi 4, 3, 3, 28, 28 ; POWERPC_64LE-NEXT: stb 4, -1(1) ; POWERPC_64LE-NEXT: lbz 3, -1(1) ; POWERPC_64LE-NEXT: popcntd 3, 3 ; POWERPC_64LE-NEXT: blr ; ; POWERPC_64-LABEL: test_Greater_than: ; POWERPC_64: # %bb.0: # %entry ; POWERPC_64-NEXT: lxsd 2, 0(3) ; POWERPC_64-NEXT: xxlxor 35, 35, 35 ; POWERPC_64-NEXT: li 4, 12 ; POWERPC_64-NEXT: li 3, 8 ; POWERPC_64-NEXT: vcmpequh 2, 2, 3 ; POWERPC_64-NEXT: xxlnor 34, 34, 34 ; POWERPC_64-NEXT: vmrghh 2, 2, 2 ; POWERPC_64-NEXT: vextuwlx 4, 4, 2 ; POWERPC_64-NEXT: vextuwlx 3, 3, 2 ; POWERPC_64-NEXT: clrlwi 4, 4, 31 ; POWERPC_64-NEXT: rlwimi 4, 3, 1, 30, 30 ; POWERPC_64-NEXT: mfvsrwz 3, 34 ; POWERPC_64-NEXT: rlwimi 4, 3, 2, 29, 29 ; POWERPC_64-NEXT: li 3, 0 ; POWERPC_64-NEXT: vextuwlx 3, 3, 2 ; POWERPC_64-NEXT: rlwimi 4, 3, 3, 28, 28 ; POWERPC_64-NEXT: stb 4, -1(1) ; POWERPC_64-NEXT: lbz 3, -1(1) ; POWERPC_64-NEXT: popcntd 3, 3 ; POWERPC_64-NEXT: blr ; ; POWERPC_32-LABEL: test_Greater_than: ; POWERPC_32: # %bb.0: # %entry ; POWERPC_32-NEXT: li 4, 4 ; POWERPC_32-NEXT: lxvwsx 1, 0, 3 ; POWERPC_32-NEXT: xxlxor 35, 35, 35 ; POWERPC_32-NEXT: lxvwsx 0, 3, 4 ; POWERPC_32-NEXT: xxmrghw 34, 1, 0 ; POWERPC_32-NEXT: vcmpequh 2, 2, 3 ; POWERPC_32-NEXT: xxlnor 34, 34, 34 ; POWERPC_32-NEXT: vmrghh 2, 2, 2 ; POWERPC_32-NEXT: stxv 34, -32(1) ; POWERPC_32-NEXT: lwz 3, -20(1) ; POWERPC_32-NEXT: lwz 4, -24(1) ; POWERPC_32-NEXT: clrlwi 3, 3, 31 ; POWERPC_32-NEXT: rlwimi 3, 4, 1, 30, 30 ; POWERPC_32-NEXT: lwz 4, -28(1) ; POWERPC_32-NEXT: rlwimi 3, 4, 2, 29, 29 ; POWERPC_32-NEXT: lwz 4, -32(1) ; POWERPC_32-NEXT: rlwimi 3, 4, 3, 28, 28 ; POWERPC_32-NEXT: popcntw 3, 3 ; POWERPC_32-NEXT: blr entry: %0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5 %1 = icmp ne <4 x i16> %0, zeroinitializer %2 = bitcast <4 x i1> %1 to i4 %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2) %4 = zext nneg i4 %3 to i32 ret i32 %4 } declare i4 @llvm.ctpop.i4(i4) #1 !5 = !{!6, !6, i64 0} !6 = !{!"short", !7, i64 0} !7 = !{!"omnipotent char", !8, i64 0} !8 = !{!"Simple C/C++ TBAA"}