[Clang][RISCV] Add custom TableGen backend for riscv-vector intrinsics.
Demonstrate how to generate vadd/vfadd intrinsic functions 1. add -gen-riscv-vector-builtins for clang builtins. 2. add -gen-riscv-vector-builtin-codegen for clang codegen. 3. add -gen-riscv-vector-header for riscv_vector.h. It also generates ifdef directives with extension checking, base on D94403. 4. add -gen-riscv-vector-generic-header for riscv_vector_generic.h. Generate overloading version Header for generic api. https://github.com/riscv/rvv-intrinsic-doc/blob/master/rvv-intrinsic-rfc.md#c11-generic-interface 5. update tblgen doc for riscv related options. riscv_vector.td also defines some unused type transformers for vadd, because I think it could demonstrate how tranfer type work and we need them for the whole intrinsic functions implementation in the future. Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com> Co-Authored-by: Zakk Chen <zakk.chen@sifive.com> Reviewed By: jrtc27, craig.topper, HsiangKai, Jim, Paul-C-Anagnostopoulos Differential Revision: https://reviews.llvm.org/D95016
This commit is contained in:
parent
64d2c326b7
commit
d6a0560bf2
@ -15,186 +15,5 @@
|
||||
# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)
|
||||
#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, ATTRS, "experimental-v")
|
||||
#endif
|
||||
#include "clang/Basic/riscv_vector_builtins.inc"
|
||||
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m1_vl, "q8Scq8Scq8Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m1_m_vl, "q8Scq8bq8Scq8Scq8Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m1_vl, "q4Ssq4Ssq4Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m1_m_vl, "q4Ssq4bq4Ssq4Ssq4Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m1_vl, "q2Siq2Siq2Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m1_m_vl, "q2Siq2bq2Siq2Siq2Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m1_vl, "q1SWiq1SWiq1SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiq1SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m2_vl, "q16Scq16Scq16Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m2_m_vl, "q16Scq16bq16Scq16Scq16Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m2_vl, "q8Ssq8Ssq8Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m2_m_vl, "q8Ssq8bq8Ssq8Ssq8Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m2_vl, "q4Siq4Siq4Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m2_m_vl, "q4Siq4bq4Siq4Siq4Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m2_vl, "q2SWiq2SWiq2SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiq2SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m4_vl, "q32Scq32Scq32Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m4_m_vl, "q32Scq32bq32Scq32Scq32Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m4_vl, "q16Ssq16Ssq16Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m4_m_vl, "q16Ssq16bq16Ssq16Ssq16Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m4_vl, "q8Siq8Siq8Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m4_m_vl, "q8Siq8bq8Siq8Siq8Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m4_vl, "q4SWiq4SWiq4SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiq4SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m8_vl, "q64Scq64Scq64Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m8_m_vl, "q64Scq64bq64Scq64Scq64Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m8_vl, "q32Ssq32Ssq32Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m8_m_vl, "q32Ssq32bq32Ssq32Ssq32Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m8_vl, "q16Siq16Siq16Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m8_m_vl, "q16Siq16bq16Siq16Siq16Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m8_vl, "q8SWiq8SWiq8SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiq8SWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf2_vl, "q4Scq4Scq4Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf2_m_vl, "q4Scq4bq4Scq4Scq4Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf2_vl, "q2Ssq2Ssq2Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf2_m_vl, "q2Ssq2bq2Ssq2Ssq2Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32mf2_vl, "q1Siq1Siq1Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32mf2_m_vl, "q1Siq1bq1Siq1Siq1Siz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf4_vl, "q2Scq2Scq2Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf4_m_vl, "q2Scq2bq2Scq2Scq2Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf4_vl, "q1Ssq1Ssq1Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf4_m_vl, "q1Ssq1bq1Ssq1Ssq1Ssz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf8_vl, "q1Scq1Scq1Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf8_m_vl, "q1Scq1bq1Scq1Scq1Scz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m1_vl, "q8Scq8ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m1_m_vl, "q8Scq8bq8Scq8ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m1_vl, "q4Ssq4SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m1_m_vl, "q4Ssq4bq4Ssq4SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m1_vl, "q2Siq2SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m1_m_vl, "q2Siq2bq2Siq2SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m1_vl, "q1SWiq1SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m2_vl, "q16Scq16ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m2_m_vl, "q16Scq16bq16Scq16ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m2_vl, "q8Ssq8SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m2_m_vl, "q8Ssq8bq8Ssq8SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m2_vl, "q4Siq4SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m2_m_vl, "q4Siq4bq4Siq4SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m2_vl, "q2SWiq2SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m4_vl, "q32Scq32ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m4_m_vl, "q32Scq32bq32Scq32ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m4_vl, "q16Ssq16SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m4_m_vl, "q16Ssq16bq16Ssq16SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m4_vl, "q8Siq8SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m4_m_vl, "q8Siq8bq8Siq8SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m4_vl, "q4SWiq4SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m8_vl, "q64Scq64ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m8_m_vl, "q64Scq64bq64Scq64ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m8_vl, "q32Ssq32SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m8_m_vl, "q32Ssq32bq32Ssq32SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m8_vl, "q16Siq16SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m8_m_vl, "q16Siq16bq16Siq16SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m8_vl, "q8SWiq8SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiSWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf2_vl, "q4Scq4ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf2_m_vl, "q4Scq4bq4Scq4ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf2_vl, "q2Ssq2SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf2_m_vl, "q2Ssq2bq2Ssq2SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32mf2_vl, "q1Siq1SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32mf2_m_vl, "q1Siq1bq1Siq1SiSiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf4_vl, "q2Scq2ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf4_m_vl, "q2Scq2bq2Scq2ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf4_vl, "q1Ssq1SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf4_m_vl, "q1Ssq1bq1Ssq1SsSsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf8_vl, "q1Scq1ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf8_m_vl, "q1Scq1bq1Scq1ScScz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m1_vl, "q8Ucq8Ucq8Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m1_m_vl, "q8Ucq8bq8Ucq8Ucq8Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m1_vl, "q4Usq4Usq4Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m1_m_vl, "q4Usq4bq4Usq4Usq4Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m1_vl, "q2Uiq2Uiq2Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m1_m_vl, "q2Uiq2bq2Uiq2Uiq2Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m1_vl, "q1UWiq1UWiq1UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiq1UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m2_vl, "q16Ucq16Ucq16Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m2_m_vl, "q16Ucq16bq16Ucq16Ucq16Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m2_vl, "q8Usq8Usq8Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m2_m_vl, "q8Usq8bq8Usq8Usq8Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m2_vl, "q4Uiq4Uiq4Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m2_m_vl, "q4Uiq4bq4Uiq4Uiq4Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m2_vl, "q2UWiq2UWiq2UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiq2UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m4_vl, "q32Ucq32Ucq32Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m4_m_vl, "q32Ucq32bq32Ucq32Ucq32Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m4_vl, "q16Usq16Usq16Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m4_m_vl, "q16Usq16bq16Usq16Usq16Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m4_vl, "q8Uiq8Uiq8Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m4_m_vl, "q8Uiq8bq8Uiq8Uiq8Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m4_vl, "q4UWiq4UWiq4UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiq4UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m8_vl, "q64Ucq64Ucq64Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m8_m_vl, "q64Ucq64bq64Ucq64Ucq64Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m8_vl, "q32Usq32Usq32Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m8_m_vl, "q32Usq32bq32Usq32Usq32Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m8_vl, "q16Uiq16Uiq16Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m8_m_vl, "q16Uiq16bq16Uiq16Uiq16Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m8_vl, "q8UWiq8UWiq8UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiq8UWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf2_vl, "q4Ucq4Ucq4Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf2_m_vl, "q4Ucq4bq4Ucq4Ucq4Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf2_vl, "q2Usq2Usq2Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf2_m_vl, "q2Usq2bq2Usq2Usq2Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32mf2_vl, "q1Uiq1Uiq1Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32mf2_m_vl, "q1Uiq1bq1Uiq1Uiq1Uiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf4_vl, "q2Ucq2Ucq2Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf4_m_vl, "q2Ucq2bq2Ucq2Ucq2Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf4_vl, "q1Usq1Usq1Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf4_m_vl, "q1Usq1bq1Usq1Usq1Usz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf8_vl, "q1Ucq1Ucq1Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf8_m_vl, "q1Ucq1bq1Ucq1Ucq1Ucz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m1_vl, "q8Ucq8UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m1_m_vl, "q8Ucq8bq8Ucq8UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m1_vl, "q4Usq4UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m1_m_vl, "q4Usq4bq4Usq4UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m1_vl, "q2Uiq2UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m1_m_vl, "q2Uiq2bq2Uiq2UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m1_vl, "q1UWiq1UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m2_vl, "q16Ucq16UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m2_m_vl, "q16Ucq16bq16Ucq16UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m2_vl, "q8Usq8UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m2_m_vl, "q8Usq8bq8Usq8UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m2_vl, "q4Uiq4UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m2_m_vl, "q4Uiq4bq4Uiq4UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m2_vl, "q2UWiq2UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m4_vl, "q32Ucq32UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m4_m_vl, "q32Ucq32bq32Ucq32UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m4_vl, "q16Usq16UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m4_m_vl, "q16Usq16bq16Usq16UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m4_vl, "q8Uiq8UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m4_m_vl, "q8Uiq8bq8Uiq8UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m4_vl, "q4UWiq4UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m8_vl, "q64Ucq64UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m8_m_vl, "q64Ucq64bq64Ucq64UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m8_vl, "q32Usq32UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m8_m_vl, "q32Usq32bq32Usq32UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m8_vl, "q16Uiq16UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m8_m_vl, "q16Uiq16bq16Uiq16UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m8_vl, "q8UWiq8UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiUWiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf2_vl, "q4Ucq4UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf2_m_vl, "q4Ucq4bq4Ucq4UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf2_vl, "q2Usq2UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf2_m_vl, "q2Usq2bq2Usq2UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32mf2_vl, "q1Uiq1UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32mf2_m_vl, "q1Uiq1bq1Uiq1UiUiz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf4_vl, "q2Ucq2UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf4_m_vl, "q2Ucq2bq2Ucq2UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf4_vl, "q1Usq1UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf4_m_vl, "q1Usq1bq1Usq1UsUsz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf8_vl, "q1Ucq1UcUcz", "n")
|
||||
RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf8_m_vl, "q1Ucq1bq1Ucq1UcUcz", "n")
|
||||
|
||||
#undef BUILTIN
|
||||
#undef RISCVV_BUILTIN
|
||||
|
@ -84,3 +84,9 @@ clang_tablegen(arm_cde_builtin_sema.inc -gen-arm-cde-builtin-sema
|
||||
clang_tablegen(arm_cde_builtin_aliases.inc -gen-arm-cde-builtin-aliases
|
||||
SOURCE arm_cde.td
|
||||
TARGET ClangARMCdeBuiltinAliases)
|
||||
clang_tablegen(riscv_vector_builtins.inc -gen-riscv-vector-builtins
|
||||
SOURCE riscv_vector.td
|
||||
TARGET ClangRISCVVectorBuiltins)
|
||||
clang_tablegen(riscv_vector_builtin_cg.inc -gen-riscv-vector-builtin-codegen
|
||||
SOURCE riscv_vector.td
|
||||
TARGET ClangRISCVVectorBuiltinCG)
|
||||
|
210
clang/include/clang/Basic/riscv_vector.td
Normal file
210
clang/include/clang/Basic/riscv_vector.td
Normal file
@ -0,0 +1,210 @@
|
||||
//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the builtins for RISC-V V-extension. See:
|
||||
//
|
||||
// https://github.com/riscv/rvv-intrinsic-doc
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Instruction definitions
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
|
||||
// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
|
||||
// "vadd_vv_i32m2", etc).
|
||||
//
|
||||
// The elements of this collection are defined by an instantiation process the
|
||||
// range of which is specified by the cross product of the LMUL attribute and
|
||||
// every element in the attribute TypeRange. By default builtins have LMUL = [1,
|
||||
// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
|
||||
// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
|
||||
//
|
||||
// LMUL represents the fact that the types of values used by that builtin are
|
||||
// values generated by instructions that are executed under that LMUL. However,
|
||||
// this does not mean the builtin is necessarily lowered into an instruction
|
||||
// that executes under the specified LMUL. An example where this happens are
|
||||
// loads and stores of masks. A mask like `vbool8_t` can be generated, for
|
||||
// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
|
||||
// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
|
||||
// be performed under LMUL=1 because mask registers are not grouped.
|
||||
//
|
||||
// TypeRange is a non-empty sequence of basic types:
|
||||
//
|
||||
// c: int8_t (i8)
|
||||
// s: int16_t (i16)
|
||||
// i: int32_t (i32)
|
||||
// l: int64_t (i64)
|
||||
// h: float16_t (half)
|
||||
// f: float32_t (float)
|
||||
// d: float64_t (double)
|
||||
//
|
||||
// This way, given an LMUL, a record with a TypeRange "sil" will cause the
|
||||
// definition of 3 builtins. Each type "t" in the TypeRange (in this example
|
||||
// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
|
||||
// definition of that particular builtin (for the given LMUL).
|
||||
//
|
||||
// During the instantiation, types can be transformed or modified using type
|
||||
// transformers. Given a type "t" the following primitive type transformers can
|
||||
// be applied to it to yield another type.
|
||||
//
|
||||
// e: type of "t" as is (identity)
|
||||
// v: computes a vector type whose element type is "t" for the current LMUL
|
||||
// w: computes a vector type identical to what 'v' computes except for the
|
||||
// element type which is twice as wide as the element type of 'v'
|
||||
// q: computes a vector type identical to what 'v' computes except for the
|
||||
// element type which is four times as wide as the element type of 'v'
|
||||
// o: computes a vector type identical to what 'v' computes except for the
|
||||
// element type which is eight times as wide as the element type of 'v'
|
||||
// m: computes a vector type identical to what 'v' computes except for the
|
||||
// element type which is bool
|
||||
// 0: void type, ignores "t"
|
||||
// z: size_t, ignores "t"
|
||||
// t: ptrdiff_t, ignores "t"
|
||||
// c: uint8_t, ignores "t"
|
||||
//
|
||||
// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
|
||||
// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
|
||||
// Accordingly "w" would yield __rvv_int64m2_t.
|
||||
//
|
||||
// A type transformer can be prefixed by other non-primitive type transformers.
|
||||
//
|
||||
// P: constructs a pointer to the current type
|
||||
// C: adds const to the type
|
||||
// K: requires the integer type to be a constant expression
|
||||
// U: given an integer type or vector type, computes its unsigned variant
|
||||
// I: given a vector type, compute the vector type with integer type
|
||||
// elements of the same width
|
||||
// F: given a vector type, compute the vector type with floating-point type
|
||||
// elements of the same width
|
||||
// S: given a vector type, computes its equivalent one for LMUL=1. This is a
|
||||
// no-op if the vector was already LMUL=1
|
||||
//
|
||||
// Following with the example above, if t is "i", then "Ue" will yield unsigned
|
||||
// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
|
||||
// yield __rvv_float64m2_t, etc.
|
||||
//
|
||||
// Each builtin is then defined by applying each type in TypeRange against the
|
||||
// sequence of type transformers described in Suffix and Prototype.
|
||||
//
|
||||
// The name of the builtin is defined by the Name attribute (which defaults to
|
||||
// the name of the class) appended (separated with an underscore) the Suffix
|
||||
// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
|
||||
// the builtin generated will be __builtin_rvv_foo_i32m1 and
|
||||
// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
|
||||
// type transformer (say "vv") each of the types is separated with an
|
||||
// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
|
||||
//
|
||||
// The C/C++ prototype of the builtin is defined by the Prototype attribute.
|
||||
// Prototype is a non-empty sequence of type transformers, the first of which
|
||||
// is the return type of the builtin and the rest are the parameters of the
|
||||
// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
|
||||
// a first builtin will have type
|
||||
// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
|
||||
// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
|
||||
// under LMUL=1).
|
||||
//
|
||||
// There are a number of attributes that are used to constraint the number and
|
||||
// shape of the builtins generated. Refer to the comments below for them.
|
||||
class RVVBuiltin<string suffix, string prototype, string type_range,
|
||||
string managed_suffix = ""> {
|
||||
// Base name that will be prepended in __builtin_rvv_ and appended the
|
||||
// computed Suffix.
|
||||
string Name = NAME;
|
||||
|
||||
// If not empty, each instantiated builtin will have this appended after an
|
||||
// underscore (_). It is instantiated like Prototype.
|
||||
string Suffix = suffix;
|
||||
|
||||
// If empty, default MangledName is sub string of `Name` which end of first
|
||||
// '_'. For example, the default mangled name is `vadd` for Name `vadd_vv`.
|
||||
// It's used for describe some special naming cases.
|
||||
string MangledName = "";
|
||||
|
||||
// The different variants of the builtin, parameterised with a type.
|
||||
string TypeRange = type_range;
|
||||
|
||||
// We use each type described in TypeRange and LMUL with prototype to
|
||||
// instantiate a specific element of the set of builtins being defined.
|
||||
// Prototype attribute defines the C/C++ prototype of the builtin. It is a
|
||||
// non-empty sequence of type transformers, the first of which is the return
|
||||
// type of the builtin and the rest are the parameters of the builtin, in
|
||||
// order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
|
||||
// first builtin will have type
|
||||
// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
|
||||
// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
|
||||
string Prototype = prototype;
|
||||
|
||||
// This builtin has a masked form.
|
||||
bit HasMask = true;
|
||||
|
||||
// If HasMask, this flag states that this builtin has a maskedoff operand. It
|
||||
// is always the first operand in builtin and IR intrinsic.
|
||||
bit HasMaskedOffOperand = true;
|
||||
|
||||
// This builtin has a granted vector length parameter in the last position.
|
||||
bit HasVL = true;
|
||||
|
||||
// This builtin supports function overloading and has a mangled name.
|
||||
bit HasGeneric = true;
|
||||
|
||||
// Reads or writes "memory" or has other side-effects.
|
||||
bit HasSideEffects = false;
|
||||
|
||||
// This builtin is valid for the given Log2LMULs.
|
||||
list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
|
||||
|
||||
// Emit the automatic clang codegen. It describes what types we have to use
|
||||
// to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
|
||||
// k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
|
||||
// parameter of the unmasked version. k can't be the mask operand's position.
|
||||
list<int> IntrinsicTypes = [];
|
||||
|
||||
// If these names are not empty, this is the ID of the LLVM intrinsic
|
||||
// we want to lower to.
|
||||
string IRName = NAME;
|
||||
|
||||
// If HasMask, this is the ID of the LLVM intrinsic we want to lower to.
|
||||
string IRNameMask = NAME #"_mask";
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Basic classes with automatic codegen.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class RVVBinBuiltin<string suffix, string prototype, string type_range>
|
||||
: RVVBuiltin<suffix, prototype, type_range> {
|
||||
let IntrinsicTypes = [-1, 1];
|
||||
}
|
||||
|
||||
multiclass RVVBinBuiltinSet<string intrinsic_name, string type_range,
|
||||
list<list<string>> suffixes_prototypes> {
|
||||
let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
|
||||
foreach s_p = suffixes_prototypes in {
|
||||
let Name = NAME # "_" # s_p[0] in {
|
||||
defvar suffix = s_p[1];
|
||||
defvar prototype = s_p[2];
|
||||
def : RVVBinBuiltin<suffix, prototype, type_range>;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 12. Vector Integer Arithmetic Instructions
|
||||
// 12.1. Vector Single-Width Integer Add and Subtract
|
||||
defm vadd : RVVBinBuiltinSet<"vadd", "csil",
|
||||
[["vv", "v", "vvv"],
|
||||
["vx", "v", "vve"],
|
||||
["vv", "Uv", "UvUvUv"],
|
||||
["vx", "Uv", "UvUvUe"]]>;
|
||||
|
||||
// 14. Vector Floating-Point Instructions
|
||||
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
|
||||
defm vfadd : RVVBinBuiltinSet<"vfadd", "fd",
|
||||
[["vv", "v", "vvv"],
|
||||
["vf", "v", "vve"]]>;
|
@ -17820,196 +17820,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
|
||||
// Required for overloaded intrinsics.
|
||||
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
|
||||
switch (BuiltinID) {
|
||||
// We could generate all the possible combinations and handling code in
|
||||
// a file and include it here, instead of listing all the builtins plainly.
|
||||
// Something like
|
||||
// #include clang/Basic/RISCVVBuiltinCodeGen.inc
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8mf8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8mf8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8mf8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m1_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m8_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32mf2_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16mf4_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8mf8_vl:
|
||||
// The order of operands is (op1, op2, vl).
|
||||
ID = Intrinsic::riscv_vadd;
|
||||
IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
|
||||
break;
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i64m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i32mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i16mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_i8mf8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i64m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i32mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i16mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_i8mf8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u64m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u32mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u16mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vv_u8mf8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m1_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u64m8_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u32mf2_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u16mf4_m_vl:
|
||||
case RISCV::BI__builtin_rvv_vadd_vx_u8mf8_m_vl:
|
||||
ID = Intrinsic::riscv_vadd_mask;
|
||||
// The order of operands is (mask, maskedoff, op1, op2, vl).
|
||||
IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[4]->getType()};
|
||||
// The order of intrinsic operands is (maskedoff, op1, op2, mask, vl).
|
||||
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
|
||||
break;
|
||||
#include "clang/Basic/riscv_vector_builtin_cg.inc"
|
||||
}
|
||||
|
||||
assert(ID != Intrinsic::not_intrinsic);
|
||||
|
@ -209,6 +209,10 @@ clang_generate_header(-gen-arm-bf16 arm_bf16.td arm_bf16.h)
|
||||
clang_generate_header(-gen-arm-mve-header arm_mve.td arm_mve.h)
|
||||
# Generate arm_cde.h
|
||||
clang_generate_header(-gen-arm-cde-header arm_cde.td arm_cde.h)
|
||||
# Generate riscv_vector.h
|
||||
clang_generate_header(-gen-riscv-vector-header riscv_vector.td riscv_vector.h)
|
||||
# Generate riscv_vector_generic.h
|
||||
clang_generate_header(-gen-riscv-vector-generic-header riscv_vector.td riscv_vector_generic.h)
|
||||
|
||||
add_custom_target(clang-resource-headers ALL DEPENDS ${out_files})
|
||||
set_target_properties(clang-resource-headers PROPERTIES
|
||||
|
2476
clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c
Normal file
2476
clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c
Normal file
File diff suppressed because it is too large
Load Diff
516
clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c
Normal file
516
clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c
Normal file
@ -0,0 +1,516 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
|
||||
// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector_generic.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfadd(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfadd_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
2476
clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
Normal file
2476
clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
Normal file
File diff suppressed because it is too large
Load Diff
516
clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
Normal file
516
clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
Normal file
@ -0,0 +1,516 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
|
||||
// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
|
||||
return vfadd_vv_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
|
||||
return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
|
||||
return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
|
||||
return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
6
clang/test/Headers/riscv-vector-header.c
Normal file
6
clang/test/Headers/riscv-vector-header.c
Normal file
@ -0,0 +1,6 @@
|
||||
// RUN: %clang_cc1 -triple riscv64 -fsyntax-only \
|
||||
// RUN: -target-feature +m -target-feature +a -target-feature +f \
|
||||
// RUN: -target-feature +d -target-feature +experimental-v %s
|
||||
// expected-no-diagnostics
|
||||
|
||||
#include <riscv_vector.h>
|
@ -18,6 +18,7 @@ add_tablegen(clang-tblgen CLANG
|
||||
ClangTypeNodesEmitter.cpp
|
||||
MveEmitter.cpp
|
||||
NeonEmitter.cpp
|
||||
RISCVVEmitter.cpp
|
||||
SveEmitter.cpp
|
||||
TableGen.cpp
|
||||
)
|
||||
|
1074
clang/utils/TableGen/RISCVVEmitter.cpp
Normal file
1074
clang/utils/TableGen/RISCVVEmitter.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -83,6 +83,10 @@ enum ActionType {
|
||||
GenArmCdeBuiltinSema,
|
||||
GenArmCdeBuiltinCG,
|
||||
GenArmCdeBuiltinAliases,
|
||||
GenRISCVVectorHeader,
|
||||
GenRISCVVectorGenericHeader,
|
||||
GenRISCVVectorBuiltins,
|
||||
GenRISCVVectorBuiltinCG,
|
||||
GenAttrDocs,
|
||||
GenDiagDocs,
|
||||
GenOptDocs,
|
||||
@ -228,6 +232,15 @@ cl::opt<ActionType> Action(
|
||||
"Generate ARM CDE builtin code-generator for clang"),
|
||||
clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases",
|
||||
"Generate list of valid ARM CDE builtin aliases for clang"),
|
||||
clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header",
|
||||
"Generate riscv_vector.h for clang"),
|
||||
clEnumValN(GenRISCVVectorGenericHeader,
|
||||
"gen-riscv-vector-generic-header",
|
||||
"Generate riscv_vector_generic.h for clang"),
|
||||
clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins",
|
||||
"Generate riscv_vector_builtins.inc for clang"),
|
||||
clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen",
|
||||
"Generate riscv_vector_builtin_cg.inc for clang"),
|
||||
clEnumValN(GenAttrDocs, "gen-attr-docs",
|
||||
"Generate attribute documentation"),
|
||||
clEnumValN(GenDiagDocs, "gen-diag-docs",
|
||||
@ -428,6 +441,18 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
|
||||
case GenArmCdeBuiltinAliases:
|
||||
EmitCdeBuiltinAliases(Records, OS);
|
||||
break;
|
||||
case GenRISCVVectorHeader:
|
||||
EmitRVVHeader(Records, OS);
|
||||
break;
|
||||
case GenRISCVVectorGenericHeader:
|
||||
EmitRVVGenericHeader(Records, OS);
|
||||
break;
|
||||
case GenRISCVVectorBuiltins:
|
||||
EmitRVVBuiltins(Records, OS);
|
||||
break;
|
||||
case GenRISCVVectorBuiltinCG:
|
||||
EmitRVVBuiltinCG(Records, OS);
|
||||
break;
|
||||
case GenAttrDocs:
|
||||
EmitClangAttrDocs(Records, OS);
|
||||
break;
|
||||
|
@ -106,6 +106,11 @@ void EmitMveBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
|
||||
void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitRVVGenericHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
|
||||
void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
|
||||
|
@ -541,6 +541,22 @@ clang-tblgen Options
|
||||
|
||||
Generate list of valid ARM CDE builtin aliases for Clang.
|
||||
|
||||
.. option:: -gen-riscv-vector-header
|
||||
|
||||
Generate ``riscv_vector.h`` for Clang.
|
||||
|
||||
.. option:: -gen-riscv-vector-generic-header
|
||||
|
||||
Generate ``riscv_vector_generic.h`` for Clang.
|
||||
|
||||
.. option:: -gen-riscv-vector-builtins
|
||||
|
||||
Generate ``riscv_vector_builtins.inc`` for Clang.
|
||||
|
||||
.. option:: -gen-riscv-vector-builtin-codegen
|
||||
|
||||
Generate ``riscv_vector_builtin_cg.inc`` for Clang.
|
||||
|
||||
.. option:: -gen-attr-docs
|
||||
|
||||
Generate attribute documentation.
|
||||
|
Loading…
x
Reference in New Issue
Block a user