//===-- VOP3Instructions.td - Vector Instruction Definitions --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// def BITOP3_32 : ComplexPattern; def BITOP3_16 : ComplexPattern; // Special case for v_div_fmas_{f32|f64}, since it seems to be the // only VOP instruction that implicitly reads VCC. let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod" in { def VOP_F32_F32_F32_F32_VCC : VOPProfile<[f32, f32, f32, f32]> { let Outs64 = (outs DstRC.RegClass:$vdst); let HasExtVOP3DPP = 0; let HasExtDPP = 0; let IsSingle = 1; } def VOP_F64_F64_F64_F64_VCC : VOPProfile<[f64, f64, f64, f64]> { let Outs64 = (outs DstRC.RegClass:$vdst); let IsSingle = 1; } } class VOP3b_Profile : VOPProfile<[vt, vt, vt, vt]> { let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = "$vdst, $sdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod"; let IsSingle = 1; let HasExtVOP3DPP = 0; let HasExtDPP = 0; } def DIV_FIXUP_F32_PROF : VOP3_Profile { let HasExtVOP3DPP = 0; let HasExtDPP = 0; } def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> { let HasClamp = 1; let IsSingle = 1; let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = "$vdst, $sdst, $src0, $src1, $src2$clamp"; } let HasExt64BitDPP = 1 in { def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile; def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile; class V_MUL_PROF : VOP3_Profile

{ let HasExtVOP3DPP = 0; let HasExtDPP = 0; } def V_LSHL_ADD_U64_PROF : VOP3_Profile; def VOP_F64_F64_F64_F64_DPP_PROF : VOP3_Profile; def V_MAD_U32_PROF: VOP3_Profile { let HasExtVOP3DPP = 0; let HasExt64BitDPP = 1; } def VOP_I64_I64_I64_DPP : VOP3_Profile; def VOP_I32_I32_I64_DPP : VOP3_Profile> { let HasClamp = 1; } } // End HasExt64BitDPP = 1; //===----------------------------------------------------------------------===// // VOP3 INTERP //===----------------------------------------------------------------------===// class VOP3Interp pattern = []> : VOP3_Pseudo { let AsmMatchConverter = "cvtVOP3Interp"; let mayRaiseFPException = 0; } def VOP3_INTERP : VOPProfile<[f32, f32, i32, untyped]> { let Src0Mod = FPVRegInputMods; let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, InterpAttr:$attr, InterpAttrChan:$attrchan, Clamp0:$clamp, omod0:$omod); let Asm64 = "$vdst, $src0_modifiers, $attr$attrchan$clamp$omod"; } def VOP3_INTERP_MOV : VOPProfile<[f32, i32, i32, untyped]> { let Ins64 = (ins InterpSlot:$src0, InterpAttr:$attr, InterpAttrChan:$attrchan, Clamp0:$clamp, omod0:$omod); let Asm64 = "$vdst, $src0, $attr$attrchan$clamp$omod"; let HasClamp = 1; let HasSrc0Mods = 0; } class getInterp16Asm { string src2 = !if(HasSrc2, ", $src2_modifiers", ""); string omod = !if(HasOMod, "$omod", ""); string ret = " $vdst, $src0_modifiers, $attr$attrchan"#src2#"$high$clamp"#omod; } class getInterp16Ins { dag ret = !if(HasSrc2, !if(HasOMod, (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, InterpAttr:$attr, InterpAttrChan:$attrchan, Src2Mod:$src2_modifiers, VRegSrc_32:$src2, highmod:$high, Clamp0:$clamp, omod0:$omod), (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, InterpAttr:$attr, InterpAttrChan:$attrchan, Src2Mod:$src2_modifiers, VRegSrc_32:$src2, highmod:$high, Clamp0:$clamp) ), (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, InterpAttr:$attr, InterpAttrChan:$attrchan, highmod:$high, Clamp0:$clamp, omod0:$omod) ); } class VOP3_INTERP16 ArgVT> : VOPProfile { let IsSingle = 1; let HasOMod = !ne(DstVT.Value, f16.Value); let HasHigh = 1; let Src0Mod = FPVRegInputMods; let Src2Mod = FPVRegInputMods; let Outs64 = (outs DstRC.RegClass:$vdst); let Ins64 = getInterp16Ins.ret; let Asm64 = getInterp16Asm.ret; } //===----------------------------------------------------------------------===// // VOP3 Instructions //===----------------------------------------------------------------------===// let isCommutable = 1 in { let isReMaterializable = 1 in { let mayRaiseFPException = 0 in { let SubtargetPredicate = HasMadMacF32Insts in { defm V_MAD_LEGACY_F32 : VOP3Inst <"v_mad_legacy_f32", VOP3_Profile>; defm V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile, any_fmad>; } // End SubtargetPredicate = HasMadMacInsts let SubtargetPredicate = HasFmaLegacy32 in defm V_FMA_LEGACY_F32 : VOP3Inst <"v_fma_legacy_f32", VOP3_Profile, int_amdgcn_fma_legacy>; } defm V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile>; defm V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile>; defm V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile, any_fma>, VOPD_Component<0x13, "v_fma_f32">; defm V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile, int_amdgcn_lerp>; let SchedRW = [WriteIntMul] in { let SubtargetPredicate = HasMadU32Inst in defm V_MAD_U32 : VOP3Inst <"v_mad_u32", V_MAD_U32_PROF>; let SubtargetPredicate = isGFX1250Plus in { defm V_MAD_NC_U64_U32 : VOP3Inst<"v_mad_nc_u64_u32", VOP_I32_I32_I64_DPP>; defm V_MAD_NC_I64_I32 : VOP3Inst<"v_mad_nc_i64_i32", VOP_I32_I32_I64_DPP>; } } let SchedRW = [WriteDoubleAdd] in { let FPDPRounding = 1 in { defm V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP_F64_F64_F64_F64_DPP_PROF, any_fma>, VOPD_Component<0x20, "v_fma_f64">; let SubtargetPredicate = isNotGFX12Plus in { defm V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile, any_fadd>; defm V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile, any_fmul>; } // End SubtargetPredicate = isNotGFX12Plus } // End FPDPRounding = 1 let SubtargetPredicate = isNotGFX12Plus in { defm V_MIN_F64 : VOP3Inst <"v_min_f64", VOP3_Profile, fminnum_like>; defm V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile, fmaxnum_like>; } // End SubtargetPredicate = isNotGFX12Plus } // End SchedRW = [WriteDoubleAdd] let SchedRW = [WriteIntMul] in { defm V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", V_MUL_PROF, DivergentBinFrag>; defm V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", V_MUL_PROF, mulhu>; defm V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", V_MUL_PROF>; defm V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", V_MUL_PROF, mulhs>; } // End SchedRW = [WriteIntMul] let SubtargetPredicate = HasIEEEMinimumMaximumInsts, ReadsModeReg = 0, AddedComplexity = 1 in { defm V_MINIMUM_F32 : VOP3Inst <"v_minimum_f32", VOP3_Profile, fminimum>; defm V_MAXIMUM_F32 : VOP3Inst <"v_maximum_f32", VOP3_Profile, fmaximum>; defm V_MINIMUM_F16 : VOP3Inst_t16 <"v_minimum_f16", VOP_F16_F16_F16, fminimum>; defm V_MAXIMUM_F16 : VOP3Inst_t16 <"v_maximum_f16", VOP_F16_F16_F16, fmaximum>; let SchedRW = [WriteDoubleAdd] in { defm V_MINIMUM_F64 : VOP3Inst <"v_minimum_f64", VOP3_Profile, fminimum>; defm V_MAXIMUM_F64 : VOP3Inst <"v_maximum_f64", VOP3_Profile, fmaximum>; } // End SchedRW = [WriteDoubleAdd] } // End SubtargetPredicate = HasIEEEMinimumMaximumInsts, ReadsModeReg = 0, AddedComplexity = 1 let SubtargetPredicate = isGFX1250Plus, SchedRW = [WriteDoubleAdd] in { defm V_MAX_I64 : VOP3Inst <"v_max_i64", VOP_I64_I64_I64_DPP, smax>; defm V_MAX_U64 : VOP3Inst <"v_max_u64", VOP_I64_I64_I64_DPP, umax>; defm V_MIN_I64 : VOP3Inst <"v_min_i64", VOP_I64_I64_I64_DPP, smin>; defm V_MIN_U64 : VOP3Inst <"v_min_u64", VOP_I64_I64_I64_DPP, umin>; } // End SubtargetPredicate = isGFX1250Plus, SchedRW = [WriteDoubleAdd] } // End isReMaterializable = 1 let Uses = [MODE, VCC, EXEC] in { // v_div_fmas_f32: // result = src0 * src1 + src2 // if (vcc) // result *= 2^32 // let SchedRW = [WriteFloatFMA] in defm V_DIV_FMAS_F32 : VOP3Inst_Pseudo_Wrapper <"v_div_fmas_f32", VOP_F32_F32_F32_F32_VCC, []>; // v_div_fmas_f64: // result = src0 * src1 + src2 // if (vcc) // result *= 2^64 // let SchedRW = [WriteDouble], FPDPRounding = 1 in defm V_DIV_FMAS_F64 : VOP3Inst_Pseudo_Wrapper <"v_div_fmas_f64", VOP_F64_F64_F64_F64_VCC, []>; } // End Uses = [MODE, VCC, EXEC] } // End isCommutable = 1 let isReMaterializable = 1 in { let mayRaiseFPException = 0 in { defm V_CUBEID_F32 : VOP3Inst <"v_cubeid_f32", VOP3_Profile, int_amdgcn_cubeid>; defm V_CUBESC_F32 : VOP3Inst <"v_cubesc_f32", VOP3_Profile, int_amdgcn_cubesc>; defm V_CUBETC_F32 : VOP3Inst <"v_cubetc_f32", VOP3_Profile, int_amdgcn_cubetc>; defm V_CUBEMA_F32 : VOP3Inst <"v_cubema_f32", VOP3_Profile, int_amdgcn_cubema>; } // End mayRaiseFPException defm V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile, AMDGPUbfe_u32>; defm V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile, AMDGPUbfe_i32>; defm V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile, AMDGPUbfi>; defm V_ALIGNBIT_B32 : VOP3Inst_t16_with_profiles <"v_alignbit_b32", VOP3_Profile, VOP3_Profile_True16, VOP3_Profile_Fake16, fshr, null_frag>; defm V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile, int_amdgcn_alignbyte>; // In gfx9 and 10, opsel is allowed for V_ALIGNBIT_B32 and V_ALIGNBYTE_B32. // Hardware uses opsel[1:0] to byte-select src2. Other opsel bits are ignored. defm V_ALIGNBIT_B32_opsel : VOP3Inst <"v_alignbit_b32_opsel", VOP3_Profile>; defm V_ALIGNBYTE_B32_opsel : VOP3Inst <"v_alignbyte_b32_opsel", VOP3_Profile>; let True16Predicate = UseRealTrue16Insts in defm V_ALIGNBYTE_B32_t16 : VOP3Inst <"v_alignbyte_b32_t16", VOP3_Profile_True16>; let True16Predicate = UseFakeTrue16Insts in defm V_ALIGNBYTE_B32_fake16 : VOP3Inst <"v_alignbyte_b32_fake16", VOP3_Profile_Fake16>; // XXX - No FPException seems suspect but manual doesn't say it does let mayRaiseFPException = 0 in { let isCommutable = 1 in { defm V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile, AMDGPUsmin3>; defm V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile, AMDGPUumin3>; defm V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile, AMDGPUsmax3>; defm V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile, AMDGPUumax3>; defm V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile, AMDGPUsmed3>; defm V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile, AMDGPUumed3>; } // End isCommutable = 1 defm V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile, AMDGPUfmin3>; defm V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile, AMDGPUfmax3>; defm V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile, AMDGPUfmed3>; } // End mayRaiseFPException = 0 let SubtargetPredicate = HasMinimum3Maximum3F32, ReadsModeReg = 0 in { defm V_MINIMUM3_F32 : VOP3Inst <"v_minimum3_f32", VOP3_Profile, AMDGPUfminimum3>; defm V_MAXIMUM3_F32 : VOP3Inst <"v_maximum3_f32", VOP3_Profile, AMDGPUfmaximum3>; } // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 let isCommutable = 1 in { defm V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile>; defm V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile>; defm V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile>; defm V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile>; } // End isCommutable = 1 defm V_CVT_PK_U8_F32 : VOP3Inst<"v_cvt_pk_u8_f32", VOP3_Profile, int_amdgcn_cvt_pk_u8_f32>; defm V_DIV_FIXUP_F32 : VOP3Inst <"v_div_fixup_f32", DIV_FIXUP_F32_PROF, AMDGPUdiv_fixup>; let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in { defm V_DIV_FIXUP_F64 : VOP3Inst <"v_div_fixup_f64", VOP3_Profile, AMDGPUdiv_fixup>; defm V_LDEXP_F64 : VOP3Inst <"v_ldexp_f64", VOP3_Profile, any_fldexp>; } // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1 } // End isReMaterializable = 1 let SubtargetPredicate = isGFX9GFX10 in def : GCNPat < (i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), (V_ALIGNBYTE_B32_opsel_e64 i32:$src0_modifiers, VSrc_b32:$src0, i32:$src1_modifiers, VSrc_b32:$src1, i32:$src2_modifiers, VGPR_32:$src2) >; let True16Predicate = UseFakeTrue16Insts in def : GCNPat < (i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), (V_ALIGNBYTE_B32_fake16_e64 i32:$src0_modifiers, VSrc_b32:$src0, i32:$src1_modifiers, VSrc_b32:$src1, i32:$src2_modifiers, VGPR_32:$src2) >; let True16Predicate = UseRealTrue16Insts in def : GCNPat < (i32 (int_amdgcn_alignbyte (i32 (VOP3OpSelMods i32:$src0, i32:$src0_modifiers)), (i32 (VOP3OpSelMods i32:$src1, i32:$src1_modifiers)), (i32 (VOP3OpSelMods i32:$src2, i32:$src2_modifiers)))), (V_ALIGNBYTE_B32_t16_e64 i32:$src0_modifiers, VSrc_b32:$src0, i32:$src1_modifiers, VSrc_b32:$src1, i32:$src2_modifiers, (i16 (EXTRACT_SUBREG VGPR_32:$src2, lo16))) >; let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does. let SchedRW = [WriteFloatFMA, WriteSALU] in defm V_DIV_SCALE_F32 : VOP3Inst_Pseudo_Wrapper <"v_div_scale_f32", VOP3b_F32_I1_F32_F32_F32> ; // Double precision division pre-scale. let SchedRW = [WriteDouble, WriteSALU], FPDPRounding = 1 in defm V_DIV_SCALE_F64 : VOP3Inst_Pseudo_Wrapper <"v_div_scale_f64", VOP3b_F64_I1_F64_F64_F64>; } // End mayRaiseFPException = 0 let isReMaterializable = 1 in defm V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile>; let Constraints = "@earlyclobber $vdst" in { defm V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile>; } // End Constraints = "@earlyclobber $vdst" let isReMaterializable = 1 in { let SchedRW = [WriteDouble] in { defm V_TRIG_PREOP_F64 : VOP3Inst <"v_trig_preop_f64", VOP3_Profile, int_amdgcn_trig_preop>; } // End SchedRW = [WriteDouble] let SchedRW = [Write64Bit] in { let SubtargetPredicate = isGFX6GFX7 in { defm V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile, cshl_64>; defm V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile, csrl_64>; defm V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile, csra_64>; } // End SubtargetPredicate = isGFX6GFX7 let SubtargetPredicate = isGFX8Plus in { defm V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile, clshr_rev_64>; defm V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile, cashr_rev_64>; } // End SubtargetPredicate = isGFX8Plus let SubtargetPredicate = isGFX8GFX9GFX10GFX11 in { defm V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile, clshl_rev_64>; } // End SubtargetPredicate = isGFX8GFX9GFX10GFX11 } // End SchedRW = [Write64Bit] } // End isReMaterializable = 1 foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in let True16Predicate = p in def : GCNPat< (i32 (DivergentUnaryFrag i16:$src)), (i32 (V_BFE_I32_e64 i16:$src, (i32 0), (i32 0x10))) >; let True16Predicate = UseRealTrue16Insts in def : GCNPat< (i32 (DivergentUnaryFrag i16:$src)), (i32 (V_BFE_I32_e64 (REG_SEQUENCE VGPR_32, VGPR_16:$src, lo16, (i16 (IMPLICIT_DEF)), hi16), (i32 0), (i32 0x10))) >; let isReMaterializable = 1 in { let SubtargetPredicate = isGFX6GFX7GFX10Plus in { defm V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile>; } // End SubtargetPredicate = isGFX6GFX7GFX10Plus let SchedRW = [Write32Bit] in { let SubtargetPredicate = isGFX8Plus in { defm V_PERM_B32 : VOP3Inst <"v_perm_b32", VOP3_Profile, AMDGPUperm>; } // End SubtargetPredicate = isGFX8Plus } // End SchedRW = [Write32Bit] } // End isReMaterializable = 1 def VOPProfileMQSAD : VOP3_Profile { let HasModifiers = 0; } let SubtargetPredicate = isGFX7Plus in { let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in { defm V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile>; defm V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOPProfileMQSAD>; } // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] } // End SubtargetPredicate = isGFX7Plus let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU] in { let SubtargetPredicate = isGFX7Plus, OtherPredicates = [HasNotMADIntraFwdBug] in { defm V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>; defm V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>; } let SubtargetPredicate = isGFX11Only, OtherPredicates = [HasMADIntraFwdBug], Constraints = "@earlyclobber $vdst" in { defm V_MAD_U64_U32_gfx11 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>; defm V_MAD_I64_I32_gfx11 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>; } } // End isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU] let FPDPRounding = 1 in { let Predicates = [Has16BitInsts, isGFX8Only] in { defm V_DIV_FIXUP_F16 : VOP3Inst <"v_div_fixup_f16", VOP3_Profile, AMDGPUdiv_fixup>; let isCommutable = 1 in { defm V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile, any_fma>; } // End isCommutable = 1 } // End Predicates = [Has16BitInsts, isGFX8Only] let SubtargetPredicate = isGFX9Plus in { defm V_DIV_FIXUP_F16_gfx9 : VOP3Inst_t16 <"v_div_fixup_f16_gfx9", VOP_F16_F16_F16_F16, AMDGPUdiv_fixup>; defm V_FMA_F16_gfx9 : VOP3Inst_t16 <"v_fma_f16_gfx9", VOP_F16_F16_F16_F16, any_fma>; } // End SubtargetPredicate = isGFX9Plus } // End FPDPRounding = 1 let SubtargetPredicate = Has16BitInsts, isCommutable = 1 in { defm V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile>; defm V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile>; let FPDPRounding = 1 in { defm V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile, any_fmad>; let Uses = [MODE, M0, EXEC] in { let OtherPredicates = [isNotGFX90APlus] in // For some reason the intrinsic operands are in a different order // from the instruction operands. def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>, [(set f16:$vdst, (int_amdgcn_interp_p2_f16 (VOP3Mods f32:$src2, i32:$src2_modifiers), (VOP3Mods f32:$src0, i32:$src0_modifiers), (i32 timm:$attrchan), (i32 timm:$attr), (i1 timm:$high), M0))]>; } // End Uses = [M0, MODE, EXEC] } // End FPDPRounding = 1 let SubtargetPredicate = isGFX9Only, FPDPRounding = 1 in { defm V_MAD_F16_gfx9 : VOP3Inst <"v_mad_f16_gfx9", VOP3_Profile> ; } // End SubtargetPredicate = isGFX9Only, FPDPRounding = 1 let SubtargetPredicate = isGFX9Plus in { defm V_MAD_U16_gfx9 : VOP3Inst_t16 <"v_mad_u16_gfx9", VOP_I16_I16_I16_I16>; defm V_MAD_I16_gfx9 : VOP3Inst_t16 <"v_mad_i16_gfx9", VOP_I16_I16_I16_I16>; let OtherPredicates = [isNotGFX90APlus] in def V_INTERP_P2_F16_gfx9 : VOP3Interp <"v_interp_p2_f16_gfx9", VOP3_INTERP16<[f16, f32, i32, f32]>>; } // End SubtargetPredicate = isGFX9Plus // This predicate should only apply to the selection pattern. The // instruction still exists and should decode on subtargets with // other bank counts. let OtherPredicates = [isNotGFX90APlus, has32BankLDS], Uses = [MODE, M0, EXEC], FPDPRounding = 1 in { def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32, i32, untyped]>, [(set f32:$vdst, (int_amdgcn_interp_p1_f16 (VOP3Mods f32:$src0, i32:$src0_modifiers), (i32 timm:$attrchan), (i32 timm:$attr), (i1 timm:$high), M0))]>; } // End OtherPredicates = [isNotGFX90APlus, has32BankLDS], Uses = [MODE, M0, EXEC], FPDPRounding = 1 let OtherPredicates = [isNotGFX90APlus], Uses = [MODE, M0, EXEC], FPDPRounding = 1 in { def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32, i32, f16]>>; } // End OtherPredicates = [isNotGFX90APlus], Uses = [MODE, M0, EXEC], FPDPRounding = 1 } // End SubtargetPredicate = Has16BitInsts, isCommutable = 1 foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in let True16Predicate = p in def : GCNPat< (i64 (DivergentUnaryFrag i16:$src)), (REG_SEQUENCE VReg_64, (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))), sub0, (i32 (COPY_TO_REGCLASS (V_ASHRREV_I32_e32 (S_MOV_B32 (i32 0x1f)), (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))) ), VGPR_32)), sub1) >; let True16Predicate = UseRealTrue16Insts in def : GCNPat< (i64 (DivergentUnaryFrag i16:$src)), (REG_SEQUENCE VReg_64, (i32 (V_BFE_I32_e64 (REG_SEQUENCE VGPR_32, VGPR_16:$src, lo16, (i16 (IMPLICIT_DEF)), hi16), (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))), sub0, (i32 (COPY_TO_REGCLASS (V_ASHRREV_I32_e32 (S_MOV_B32 (i32 0x1f)), (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))) ), VGPR_32)), sub1) >; let SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC], OtherPredicates = [isNotGFX90APlus] in { def V_INTERP_P1_F32_e64 : VOP3Interp <"v_interp_p1_f32", VOP3_INTERP>; def V_INTERP_P2_F32_e64 : VOP3Interp <"v_interp_p2_f32", VOP3_INTERP>; def V_INTERP_MOV_F32_e64 : VOP3Interp <"v_interp_mov_f32", VOP3_INTERP_MOV>; } // End SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC], OtherPredicates = [isNotGFX90APlus] // Note: 16-bit instructions produce a 0 result in the high 16-bits // on GFX8 and GFX9 and preserve high 16 bits on GFX10+ multiclass Arithmetic_i16_0Hi_TernaryPats { def : GCNPat< (i32 (zext (op i16:$src0, i16:$src1, i16:$src2))), (inst VSrc_b16:$src0, VSrc_b16:$src1, VSrc_b16:$src2) >; } let Predicates = [Has16BitInsts, isGFX8GFX9] in { defm : Arithmetic_i16_0Hi_TernaryPats; } let Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] in { // FIXME: Should be able to just pass imad to the instruction // definition pattern, but the implied clamp input interferes. multiclass Ternary_i16_Pats { def : GCNPat < (op i16:$src0, i16:$src1, i16:$src2), (inst i16:$src0, i16:$src1, i16:$src2, (i1 0)) >; } defm: Ternary_i16_Pats; } // End Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] multiclass Ternary_i16_Pats_gfx9 { def : GCNPat < (op2 (op1 i16:$src0, i16:$src1), i16:$src2), (inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE) >; } let True16Predicate = UseRealTrue16Insts in { defm: Ternary_i16_Pats_gfx9; } // End True16Predicates = UseRealTrue16Insts let True16Predicate = UseFakeTrue16Insts in { defm: Ternary_i16_Pats_gfx9; } // End True16Predicates = UseFakeTrue16Insts let OtherPredicates = [isGFX10Plus, Has16BitInsts], True16Predicate = NotHasTrue16BitInsts in { defm: Ternary_i16_Pats_gfx9; } // End OtherPredicates = [isGFX10Plus, Has16BitInsts], True16Predicate = NotHasTrue16BitInsts class ThreeOpFragSDAG : PatFrag< (ops node:$x, node:$y, node:$z), // When the inner operation is used multiple times, selecting 3-op // instructions may still be beneficial -- if the other users can be // combined similarly. Let's be conservative for now. (op2 (HasOneUseBinOp node:$x, node:$y), node:$z), [{ // Only use VALU ops when the result is divergent. if (!N->isDivergent()) return false; // Check constant bus limitations. // // Note: Use !isDivergent as a conservative proxy for whether the value // is in an SGPR (uniform values can end up in VGPRs as well). unsigned ConstantBusUses = 0; for (unsigned i = 0; i < 3; ++i) { if (!Operands[i]->isDivergent() && !isInlineImmediate(Operands[i].getNode())) { ConstantBusUses++; // This uses AMDGPU::V_ADD3_U32_e64, but all three operand instructions // have the same constant bus limit. if (ConstantBusUses > Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32_e64)) return false; } } return true; }]> { let PredicateCodeUsesOperands = 1; } class ThreeOpFrag : ThreeOpFragSDAG { // The divergence predicate is irrelevant in GlobalISel, as we have // proper register bank checks. We just need to verify the constant // bus restriction when all the sources are considered. // // FIXME: With unlucky SGPR operands, we could penalize code by // blocking folding SGPR->VGPR copies later. // FIXME: There's no register bank verifier let GISelPredicateCode = [{ const int ConstantBusLimit = Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32_e64); int ConstantBusUses = 0; for (unsigned i = 0; i < 3; ++i) { const RegisterBank *RegBank = RBI.getRegBank(Operands[i]->getReg(), MRI, TRI); if (RegBank->getID() == AMDGPU::SGPRRegBankID) { if (++ConstantBusUses > ConstantBusLimit) return false; } } return true; }]; } def shl_0_to_4 : PatFrag< (ops node:$src0, node:$src1), (shl node:$src0, node:$src1), [{ if (auto *C = dyn_cast(N->getOperand(1))) { return C->getZExtValue() <= 4; } return false; }]> { let GISelPredicateCode = [{ int64_t Imm = 0; if (!mi_match(MI.getOperand(2).getReg(), MRI, m_ICst(Imm)) && !mi_match(MI.getOperand(2).getReg(), MRI, m_Copy(m_ICst(Imm)))) return false; return (uint64_t)Imm <= 4; }]; } class VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile { defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)), (ins VGPR_32:$vdst_in, op_sel0:$op_sel)); let InsVOP3OpSel = !con(getIns64.ret, Tail); let InsVOP3Base = !con(getInsVOP3Base.ret, Tail); let HasClamp = _HasClamp; let HasExtVOP3DPP = 1; } class VOP3_CVT_PK_F8_F32_Profile_fake16 : VOP3_Profile_Fake16 { defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)), (ins VGPR_32:$vdst_in, op_sel0:$op_sel)); let InsVOP3OpSel = !con(getIns64.ret, Tail); let InsVOP3Base = !con(getInsVOP3Base.ret, Tail); let HasClamp = _HasClamp; let HasExtVOP3DPP = 1; } // This t16 profile with vdst_in operand is for backward compatibility and is used // for user controlled packing class VOP3_CVT_PK_F8_F32_Profile_t16 : VOP3_Profile_True16 { defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)), (ins VGPR_16:$vdst_in, op_sel0:$op_sel)); let InsVOP3OpSel = !con(getIns64.ret, Tail); let InsVOP3Base = !con(getInsVOP3Base.ret, Tail); let HasClamp = _HasClamp; let HasExtVOP3DPP = 1; } def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile, VOP3_OPSEL> { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, FP32InputMods:$src2_modifiers, VGPR_32:$src2, op_sel0:$op_sel); let InsVOP3DPP16 = (ins VGPR_32:$old, FP32InputMods:$src0_modifiers, Src0VOP3DPP:$src0, FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1, FP32InputMods:$src2_modifiers, VGPR_32:$src2, op_sel0:$op_sel, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi); let InsVOP3DPP8 = (ins VGPR_32:$old, FP32InputMods:$src0_modifiers, Src0VOP3DPP:$src0, FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1, FP32InputMods:$src2_modifiers, VGPR_32:$src2, op_sel0:$op_sel, dpp8:$dpp8, Dpp8FI:$fi); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 1; let HasExtVOP3DPP = 1; let HasOpSel = 1; let HasFP8DstByteSel = 1; let HasFP8ByteSel = 0; // It works as a dst-bytesel, but does not have byte_sel operand. let AsmVOP3Base = !subst(", $src2_modifiers", "", getAsmVOP3Base.ret); } class VOP3_CVT_SR_F8_ByteSel_Profile : VOP3_Profile> { let HasFP8DstByteSel = 1; let HasClamp = _HasClamp; } def IsPow2Plus1: PatLeaf<(i32 imm), [{ uint32_t V = N->getZExtValue(); return isPowerOf2_32(V - 1); }]>; def Log2_32: SDNodeXFormgetZExtValue(); return CurDAG->getTargetConstant(Log2_32(V - 1), SDLoc(N), MVT::i32); }]>; let SubtargetPredicate = isGFX9Plus in { let isCommutable = 1, isReMaterializable = 1 in { defm V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile>; defm V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile>; defm V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile>; defm V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile>; defm V_ADD_I32 : VOP3Inst <"v_add_i32", VOP3_Profile>; defm V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile>; } // End isCommutable = 1, isReMaterializable = 1 // TODO src0 contains the opsel bit for dst, so if we commute, need to mask and swap this // to the new src0. defm V_MED3_F16 : VOP3Inst_t16 <"v_med3_f16", VOP_F16_F16_F16_F16, AMDGPUfmed3>; defm V_MED3_I16 : VOP3Inst_t16 <"v_med3_i16", VOP_I16_I16_I16_I16, AMDGPUsmed3>; defm V_MED3_U16 : VOP3Inst_t16 <"v_med3_u16", VOP_I16_I16_I16_I16, AMDGPUumed3>; defm V_MIN3_F16 : VOP3Inst_t16 <"v_min3_f16", VOP_F16_F16_F16_F16, AMDGPUfmin3>; defm V_MIN3_I16 : VOP3Inst_t16 <"v_min3_i16", VOP_I16_I16_I16_I16, AMDGPUsmin3>; defm V_MIN3_U16 : VOP3Inst_t16 <"v_min3_u16", VOP_I16_I16_I16_I16, AMDGPUumin3>; defm V_MAX3_F16 : VOP3Inst_t16 <"v_max3_f16", VOP_F16_F16_F16_F16, AMDGPUfmax3>; defm V_MAX3_I16 : VOP3Inst_t16 <"v_max3_i16", VOP_I16_I16_I16_I16, AMDGPUsmax3>; defm V_MAX3_U16 : VOP3Inst_t16 <"v_max3_u16", VOP_I16_I16_I16_I16, AMDGPUumax3>; let SubtargetPredicate = HasMinimum3Maximum3F16, ReadsModeReg = 0 in { defm V_MINIMUM3_F16 : VOP3Inst_t16 <"v_minimum3_f16", VOP_F16_F16_F16_F16, AMDGPUfminimum3>; defm V_MAXIMUM3_F16 : VOP3Inst_t16 <"v_maximum3_f16", VOP_F16_F16_F16_F16, AMDGPUfmaximum3>; } // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 let SubtargetPredicate = HasAddMinMaxInsts, isCommutable = 1, isReMaterializable = 1 in { defm V_ADD_MAX_I32 : VOP3Inst <"v_add_max_i32", VOP_I32_I32_I32_I32>; defm V_ADD_MAX_U32 : VOP3Inst <"v_add_max_u32", VOP_I32_I32_I32_I32>; defm V_ADD_MIN_I32 : VOP3Inst <"v_add_min_i32", VOP_I32_I32_I32_I32>; defm V_ADD_MIN_U32 : VOP3Inst <"v_add_min_u32", VOP_I32_I32_I32_I32>; } defm V_ADD_I16 : VOP3Inst_t16 <"v_add_i16", VOP_I16_I16_I16>; defm V_SUB_I16 : VOP3Inst_t16 <"v_sub_i16", VOP_I16_I16_I16>; let isCommutable = 1 in { defm V_MAD_U32_U16 : VOP3Inst_t16 <"v_mad_u32_u16", VOP_I32_I16_I16_I32>; defm V_MAD_I32_I16 : VOP3Inst_t16 <"v_mad_i32_i16", VOP_I32_I16_I16_I32>; } // End isCommutable = 1 defm V_CVT_PKNORM_I16_F16 : VOP3Inst_t16 <"v_cvt_pknorm_i16_f16", VOP_B32_F16_F16>; defm V_CVT_PKNORM_U16_F16 : VOP3Inst_t16 <"v_cvt_pknorm_u16_f16", VOP_B32_F16_F16>; defm V_PACK_B32_F16 : VOP3Inst_t16 <"v_pack_b32_f16", VOP_B32_F16_F16>; let isReMaterializable = 1 in { defm V_SUB_I32 : VOP3Inst <"v_sub_i32", VOP3_Profile>; defm V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile>; defm V_LSHL_OR_B32 : VOP3Inst <"v_lshl_or_b32", VOP3_Profile>; } // End isReMaterializable = 1 // V_LSHL_ADD_U64: D0.u64 = (S0.u64 << S1.u[2:0]) + S2.u64 // src0 is shifted left by 0-4 (use “0” to get ADD_U64). let SubtargetPredicate = HasLshlAddU64Inst in defm V_LSHL_ADD_U64 : VOP3Inst <"v_lshl_add_u64", V_LSHL_ADD_U64_PROF>; let OtherPredicates = [HasFP8ConversionInsts], mayRaiseFPException = 0, SchedRW = [WriteFloatCvt] in { let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in { let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in defm V_CVT_PK_FP8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32", VOP3_CVT_PK_F8_F32_Profile<>, VOP3_CVT_PK_F8_F32_Profile_t16<>, VOP3_CVT_PK_F8_F32_Profile_fake16<>>; let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in defm V_CVT_PK_FP8_F32_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32_gfx1250", VOP3_CVT_PK_F8_F32_Profile, VOP3_CVT_PK_F8_F32_Profile_t16, VOP3_CVT_PK_F8_F32_Profile_fake16>; defm V_CVT_PK_BF8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f32", VOP3_CVT_PK_F8_F32_Profile<>, VOP3_CVT_PK_F8_F32_Profile_t16<>, VOP3_CVT_PK_F8_F32_Profile_fake16<>>; let SubtargetPredicate = isGFX12Plus in { let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in defm V_CVT_SR_FP8_F32_gfx12 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile>; let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in defm V_CVT_SR_FP8_F32_gfx1250 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx1250", VOP3_CVT_SR_F8_ByteSel_Profile>; defm V_CVT_SR_BF8_F32_gfx12 : VOP3Inst<"v_cvt_sr_bf8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile>; } } // These instructions have non-standard use of op_sel. In particular they are // using op_sel bits 2 and 3 while only having two sources. Therefore dummy // src2 is used to hold the op_sel value. let Constraints = "$vdst = $src2", DisableEncoding = "$src2", SubtargetPredicate = isGFX940Plus in { defm V_CVT_SR_FP8_F32 : VOP3Inst<"v_cvt_sr_fp8_f32", VOP3_CVT_SR_F8_F32_Profile>; defm V_CVT_SR_BF8_F32 : VOP3Inst<"v_cvt_sr_bf8_f32", VOP3_CVT_SR_F8_F32_Profile>; } } class Cvt_PK_F8_F32_Pat : GCNPat< (i32 (node f32:$src0, f32:$src1, i32:$old, index)), (inst !if(index, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, $old, 0) >; class Cvt_PK_F8_F32_E5M3_Pat : GCNPat< (i32 (node f32:$src0, f32:$src1, i32:$old, index)), (inst !if(index, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, Clamp, $old, 0) >; multiclass Cvt_PK_F8_F32_t16_Pat { def : GCNPat< (i32 (node f32:$src0, f32:$src1, i32:$old, -1)), (REG_SEQUENCE VGPR_32, (i16 (EXTRACT_SUBREG $old, lo16)), lo16, (i16 (inst SRCMODS.DST_OP_SEL, $src0, 0, $src1, (i16 (EXTRACT_SUBREG $old, hi16)), 0)), hi16) >; def : GCNPat< (i32 (node f32:$src0, f32:$src1, i32:$old, 0)), (REG_SEQUENCE VGPR_32, (i16 (inst 0, $src0, 0, $src1, (i16 (EXTRACT_SUBREG $old, lo16)), 0)), lo16, (i16 (EXTRACT_SUBREG $old, hi16)), hi16) >; } multiclass Cvt_PK_F8_F32_E5M3_t16_Pat { def : GCNPat< (i32 (node f32:$src0, f32:$src1, i32:$old, -1)), (REG_SEQUENCE VGPR_32, (i16 (EXTRACT_SUBREG $old, lo16)), lo16, (i16 (inst SRCMODS.DST_OP_SEL, $src0, 0, $src1, Clamp, (i16 (EXTRACT_SUBREG $old, hi16)), 0)), hi16) >; def : GCNPat< (i32 (node f32:$src0, f32:$src1, i32:$old, 0)), (REG_SEQUENCE VGPR_32, (i16 (inst 0, $src0, 0, $src1, Clamp, (i16 (EXTRACT_SUBREG $old, lo16)), 0)), lo16, (i16 (EXTRACT_SUBREG $old, hi16)), hi16) >; } class Cvt_SR_F8_F32_Pat index, VOP3_Pseudo inst> : GCNPat< (i32 (node f32:$src0, i32:$src1, i32:$old, index)), (inst !if(index{1}, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, !if(index{0}, SRCMODS.OP_SEL_0, 0), $old, 0) >; class Cvt_SR_F8_ByteSel_Pat : GCNPat< (i32 (node (VOP3Mods SrcVT:$src0, i32:$src0_modifiers), (VOP3Mods i32:$src1, i32:$src1_modifiers), i32:$old, timm:$byte_sel)), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, $old, (as_i32timm $byte_sel)) >; class Cvt_SR_F8_ByteSel_E5M3_Pat : GCNPat< (i32 (node (VOP3Mods SrcVT:$src0, i32:$src0_modifiers), (VOP3Mods i32:$src1, i32:$src1_modifiers), i32:$old, timm:$byte_sel)), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, Clamp, $old, (as_i32timm $byte_sel)) >; let OtherPredicates = [HasFP8ConversionInsts] in { foreach Index = [0, -1] in { let True16Predicate = NotHasTrue16BitInsts in { let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in def : Cvt_PK_F8_F32_Pat; def : Cvt_PK_F8_F32_Pat; } let True16Predicate = UseFakeTrue16Insts in { def : Cvt_PK_F8_F32_Pat; def : Cvt_PK_F8_F32_Pat; let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in { def : Cvt_PK_F8_F32_E5M3_Pat; def : Cvt_PK_F8_F32_E5M3_Pat; } } } let True16Predicate = UseRealTrue16Insts in { defm : Cvt_PK_F8_F32_t16_Pat; defm : Cvt_PK_F8_F32_t16_Pat; let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in { defm : Cvt_PK_F8_F32_E5M3_t16_Pat; defm : Cvt_PK_F8_F32_E5M3_t16_Pat; } } let SubtargetPredicate = isGFX940Plus in { foreach Index = [0, 1, 2, 3] in { def : Cvt_SR_F8_F32_Pat; def : Cvt_SR_F8_F32_Pat; } } let SubtargetPredicate = isGFX12Plus in { let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in def : Cvt_SR_F8_ByteSel_Pat; let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in { def : Cvt_SR_F8_ByteSel_E5M3_Pat; def : Cvt_SR_F8_ByteSel_E5M3_Pat; } def : Cvt_SR_F8_ByteSel_Pat; } } class ThreeOp_i32_Pats : GCNPat < // This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions. (ThreeOpFrag i32:$src0, i32:$src1, i32:$src2), (inst VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2) >; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; let SubtargetPredicate = HasMadU32Inst, AddedComplexity = 10 in def : ThreeOp_i32_Pats; def : GCNPat< (DivergentBinFrag i32:$src0, IsPow2Plus1:$src1), (V_LSHL_ADD_U32_e64 i32:$src0, (i32 (Log2_32 imm:$src1)), i32:$src0)>; let SubtargetPredicate = HasLshlAddU64Inst in def : GCNPat< (ThreeOpFrag i64:$src0, i32:$src1, i64:$src2), (V_LSHL_ADD_U64_e64 VSrc_b64:$src0, VSrc_b32:$src1, VSrc_b64:$src2) >; let SubtargetPredicate = HasAddMinMaxInsts in { def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; } def : VOPBinOpClampPat; def : VOPBinOpClampPat; def : GCNPat<(DivergentBinFrag (or_oneuse i64:$src0, i64:$src1), i64:$src2), (REG_SEQUENCE VReg_64, (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)), (i32 (EXTRACT_SUBREG $src1, sub0)), (i32 (EXTRACT_SUBREG $src2, sub0))), sub0, (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)), (i32 (EXTRACT_SUBREG $src1, sub1)), (i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>; } // End SubtargetPredicate = isGFX9Plus // FIXME: Probably should hardcode clamp bit in pseudo and avoid this. class OpSelBinOpClampPat : GCNPat< (node (i16 (VOP3OpSel i16:$src0, i32:$src0_modifiers)), (i16 (VOP3OpSel i16:$src1, i32:$src1_modifiers))), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE, 0) >; let SubtargetPredicate = isGFX9Plus, True16Predicate = NotHasTrue16BitInsts in { def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; } // End SubtargetPredicate = isGFX9Plus, True16Predicate = NotHasTrue16BitInsts let True16Predicate = UseRealTrue16Insts in { def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; } // End True16Predicate = UseRealTrue16Insts let True16Predicate = UseFakeTrue16Insts in { def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; } // End True16Predicate = UseFakeTrue16Insts multiclass IMAD32_Pats { def : GCNPat < (ThreeOpFrag i32:$src0, i32:$src1, i32:$src2), (EXTRACT_SUBREG (inst i32:$src0, i32:$src1, (REG_SEQUENCE SReg_64, // Use scalar and let it be legalized $src2, sub0, (i32 (IMPLICIT_DEF)), sub1), 0 /* clamp */), sub0) >; // GISel-specific pattern that avoids creating a SGPR->VGPR copy if // $src2 is a VGPR. def : GCNPat < (ThreeOpFrag i32:$src0, i32:$src1, VGPR_32:$src2), (EXTRACT_SUBREG (inst i32:$src0, i32:$src1, (REG_SEQUENCE VReg_64, $src2, sub0, (i32 (IMPLICIT_DEF)), sub1), 0 /* clamp */), sub0) >; // Immediate src2 in the pattern above will not fold because it would be partially // undef. Hence define specialized pattern for this case. def : GCNPat < (ThreeOpFrag i32:$src0, i32:$src1, (i32 imm:$src2)), (EXTRACT_SUBREG (inst i32:$src0, i32:$src1, (i64 (as_i64imm $src2)), 0 /* clamp */), sub0) >; } // Handle cases where amdgpu-codegenprepare-mul24 made a mul24 instead of a normal mul. // We need to separate this because otherwise OtherPredicates would be overriden. class IMAD32_Mul24_Pat: GCNPat < (i64 (add (i64 (AMDGPUmul_u24 i32:$src0, i32:$src1)), i64:$src2)), (inst $src0, $src1, $src2, 0 /* clamp */) >; // exclude pre-GFX9 where it was slow let OtherPredicates = [HasNotMADIntraFwdBug], SubtargetPredicate = isGFX9Plus in { defm : IMAD32_Pats; def : IMAD32_Mul24_Pat; } let OtherPredicates = [HasMADIntraFwdBug], SubtargetPredicate = isGFX11Only in { defm : IMAD32_Pats; def : IMAD32_Mul24_Pat; } def VOP3_PERMLANE_Profile : VOP3_Profile, VOP3_OPSEL> { let InsVOP3OpSel = (ins IntOpSelMods:$src0_modifiers, VRegSrc_32:$src0, IntOpSelMods:$src1_modifiers, SSrc_b32:$src1, IntOpSelMods:$src2_modifiers, SSrc_b32:$src2, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasExtVOP3DPP = 0; let HasExtDPP = 0; } def VOP3_PERMLANE_VAR_Profile : VOP3_Profile, VOP3_OPSEL> { let InsVOP3OpSel = (ins IntOpSelMods:$src0_modifiers, VRegSrc_32:$src0, IntOpSelMods:$src1_modifiers, VRegSrc_32:$src1, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasExtVOP3DPP = 0; let HasExtDPP = 0; } class VOP3_PERMLANE_NOOPSEL_Profile : VOP3_Profile

{ let Ins64 = !con((ins VRegSrc_32:$src0, SSrc_b32:$src1), !if(P.HasSrc2, (ins SSrc_b32:$src2), (ins))); let HasClamp = 0; let HasExtVOP3DPP = 0; let HasExtDPP = 0; } def opsel_i1timm : SDNodeXFormgetTargetConstant( N->getZExtValue() ? SISrcMods::OP_SEL_0 : SISrcMods::NONE, SDLoc(N), MVT::i32); }]>; def gi_opsel_i1timm : GICustomOperandRenderer<"renderOpSelTImm">, GISDNodeXFormEquiv; class SrcAndDstSelToOpSelXForm : SDNodeXFormgetZExtValue(); unsigned New = 0; if (}] # modifier_idx # [{ == 0) { New = (}] # dest_sel # [{ == 1) ? ((Val & 0x1) ? (SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL) : SISrcMods::DST_OP_SEL) : ((Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE); } else if (}] # modifier_idx # [{== 1) { New = (Val & 0x2) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE; } if (}] # modifier_idx # [{== 2) { New = (Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE; } return CurDAG->getTargetConstant(New, SDLoc(N), MVT::i32); }]>; def SrcAndDstSelToOpSelXForm_0_0 : SrcAndDstSelToOpSelXForm<0,0>; def SrcAndDstSelToOpSelXForm_0_1 : SrcAndDstSelToOpSelXForm<0,1>; def SrcAndDstSelToOpSelXForm_1_0 : SrcAndDstSelToOpSelXForm<1,0>; def SrcAndDstSelToOpSelXForm_1_1 : SrcAndDstSelToOpSelXForm<1,1>; def SrcAndDstSelToOpSelXForm_2_0 : SrcAndDstSelToOpSelXForm<2,0>; // The global isel renderer has no way to access the templatized args of (SrcAndDstSelToOpSelXForm) in // renderer C++ APIs. Therefore, combinations of modifier_idx & dest_sel are embedded in renderer name itself. // FixMe: Avoid combinations of modifier_idx & dest_sel for global isel cases. def gi_SrcAndDstSelToOpSelXForm_0_0 : GICustomOperandRenderer<"renderSrcAndDstSelToOpSelXForm_0_0">, GISDNodeXFormEquiv; def gi_SrcAndDstSelToOpSelXForm_0_1 : GICustomOperandRenderer<"renderSrcAndDstSelToOpSelXForm_0_1">, GISDNodeXFormEquiv; def gi_SrcAndDstSelToOpSelXForm_1_0 : GICustomOperandRenderer<"renderSrcAndDstSelToOpSelXForm_1_0">, GISDNodeXFormEquiv; def gi_SrcAndDstSelToOpSelXForm_1_1 : GICustomOperandRenderer<"renderSrcAndDstSelToOpSelXForm_1_1">, GISDNodeXFormEquiv; def gi_SrcAndDstSelToOpSelXForm_2_0 : GICustomOperandRenderer<"renderSrcAndDstSelToOpSelXForm_2_0">, GISDNodeXFormEquiv; def DstSelToOpSelXForm : SDNodeXFormgetTargetConstant( N->getZExtValue() ? SISrcMods::DST_OP_SEL : SISrcMods::NONE, SDLoc(N), MVT::i32); }]>; def gi_DstSelToOpSelXForm : GICustomOperandRenderer<"renderDstSelToOpSelXForm">, GISDNodeXFormEquiv; def SrcSelToOpSelXForm : SDNodeXFormgetTargetConstant( N->getZExtValue() ? SISrcMods::OP_SEL_0 : SISrcMods::NONE, SDLoc(N), MVT::i32); }]>; def gi_SrcSelToOpSelXForm : GICustomOperandRenderer<"renderSrcSelToOpSelXForm">, GISDNodeXFormEquiv; def DstSelToOpSel3XForm : SDNodeXFormgetZExtValue(); return CurDAG->getTargetConstant( (V & 0x2) ? SISrcMods::DST_OP_SEL : SISrcMods::NONE, SDLoc(N), MVT::i32); }]>; def gi_DstSelToOpSel3XForm : GICustomOperandRenderer<"renderDstSelToOpSel3XFormXForm">, GISDNodeXFormEquiv; class PermlanePat : GCNPat< (vt (permlane vt:$vdst_in, vt:$src0, i32:$src1, i32:$src2, timm:$fi, timm:$bc)), (inst (opsel_i1timm $fi), VGPR_32:$src0, (opsel_i1timm $bc), SCSrc_b32:$src1, 0, SCSrc_b32:$src2, VGPR_32:$vdst_in) >; class PermlaneVarPat : GCNPat< (permlane i32:$vdst_in, i32:$src0, i32:$src1, timm:$fi, timm:$bc), (inst (opsel_i1timm $fi), VGPR_32:$src0, (opsel_i1timm $bc), VGPR_32:$src1, VGPR_32:$vdst_in) >; class PermlaneNoDppPat3Src : GCNPat< (permlane i32:$src0, i32:$src1, i32:$src2), (inst VGPR_32:$src0, SCSrc_b32:$src1, SCSrc_b32:$src2) >; class PermlaneNoDppPat2Src : GCNPat< (permlane i32:$src0, i32:$src1), (inst VGPR_32:$src0, SCSrc_b32:$src1) >; class VOP3_BITOP3_Profile : VOP3_Profile { let HasClamp = 0; let HasOMod = 0; let HasModifiers = 0; let HasVOPD3Src2 = 0; let HasBitOp3 = 1; let InsVOPD3Y = (ins Src0VOPD3:$src0Y, Src1VOPD3:$vsrc1Y, bitop3_0:$bitop3); let AsmVOPD3Y = getAsmVOPDPart.ret # "$bitop3"; } class VOP3_CVT_SCALE_F1632_FP8BF8_Profile : VOP3_Profile, VOP3_OPSEL> { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, op_sel0:$op_sel); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; } class VOP3_CVT_SCALE_F1632_FP8BF8_TiedInput_Profile : VOP3_Profile { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; } class VOP3_CVT_SCALE_FP4FP8BF8_F32_TiedInput_Profile : VOP3_Profile { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, FP32InputMods:$src2_modifiers, Src2RC64:$src2, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; } class VOP3_CVT_SCALE_FP4_F32_TiedInput_Profile : VOP3_CVT_SCALE_FP4FP8BF8_F32_TiedInput_Profile

{ let HasFP8DstByteSel = 1; let HasFP8ByteSel = 0; // It works as a dst-bytesel, but does not have byte_sel operand. } class VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile : VOP3_CVT_SCALE_FP4FP8BF8_F32_TiedInput_Profile

{ let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, Int32InputMods:$src1_modifiers, Src1RC64:$src1, FP32InputMods:$src2_modifiers, Src2RC64:$src2, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasFP8DstByteSel = 1; let HasFP8ByteSel = 0; // It works as a dst-bytesel, but does not have byte_sel operand. } class VOP3_CVT_SCALE_FP4_F16BF16_TiedInput_Profile : VOP3_Profile { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, FP32InputMods:$src2_modifiers, VGPR_32:$src2, op_sel0:$op_sel); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 1; let HasOpSel = 1; let Asm64 = !subst(", $src2_modifiers", "", AsmVOP3Base); let HasExtVOP3DPP = 0; let HasFP8DstByteSel = 1; let HasFP8ByteSel = 0; } class VOP3_CVT_SCALE_SR_PK_F4_F16BF16_TiedInput_Profile : VOP3_Profile, VOP3_OPSEL> { let InsVOP3OpSel = (ins PackedF16InputMods: $src0_modifiers, Src0RC64:$src0, Int32InputMods: $src1_modifiers, Src1RC64:$src1, FP32InputMods: $src2_modifiers, Src2RC64:$src2, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; let HasFP4DstByteSel = 1; } class VOP3_CVT_SCALE_SR_PK_F4_F32_TiedInput_Profile : VOP3_Profile { let Src0RC64 = !if(!gt(P.Src0VT.Size, 32), getVOP3VRegSrcForVT.ret, getVOP3SrcForVT.ret); let InsVOP3OpSel = (ins PackedVGPRF32InputMods: $src0_modifiers, Src0RC64:$src0, Int32InputMods: $src1_modifiers, Src1RC64:$src1, FP32InputMods: $src2_modifiers, Src2RC64:$src2, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; let HasFP4DstByteSel = 1; } class VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile : VOP3_Profile, VOP3_OPSEL> { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, op_sel0:$op_sel); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; } class VOP3_CVT_SCALE_PK_FP8BF8_F16BF16_TiedInput_Profile : VOP3_Profile { let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, FP32InputMods:$src1_modifiers, Src1RC64:$src1, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; let HasSrc2 = 0; let HasSrc2Mods = 0; let HasExtVOP3DPP = 0; let HasOpSel = 1; let HasOMod = 0; } class VOP3_CVT_SCALEF32_PK_F864_Profile : VOP3_Profile

{ let HasModifiers = 0; let HasSrc0IntMods = 0; let HasSrc1IntMods = 0; let HasSrc0FloatMods = 0; let HasSrc1FloatMods = 0; let HasSrc2FloatMods = 0; let HasOMod = 0; let HasOpSel = 0; let HasClamp = 0; let HasExtDPP = 0; let HasExt32BitDPP = 0; let HasExtVOP3DPP = 0; let HasExt64BitDPP = 0; // All convert opcodes operating on FP6/BF6/FP4 data must use VGPR sources for // any operand slots > 32 bit. let Src0RC64 = !if(!gt(P.Src0VT.Size, 32), getVOP3VRegSrcForVT.ret, getVOP3SrcForVT.ret); } let SubtargetPredicate = HasFP8ConversionScaleInsts, mayRaiseFPException = 0 in { let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in { defm V_CVT_SCALEF32_SR_FP8_BF16 : VOP3Inst<"v_cvt_scalef32_sr_fp8_bf16", VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_FP8_F16 : VOP3Inst<"v_cvt_scalef32_sr_fp8_f16", VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_FP8_F32 : VOP3Inst<"v_cvt_scalef32_sr_fp8_f32", VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile>; defm V_CVT_SCALEF32_F16_FP8 : VOP3Inst<"v_cvt_scalef32_f16_fp8", VOP3_CVT_SCALE_F1632_FP8BF8_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_FP8_F32 : VOP3Inst<"v_cvt_scalef32_pk_fp8_f32", VOP3_CVT_SCALE_FP4FP8BF8_F32_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_FP8_F16 : VOP3Inst<"v_cvt_scalef32_pk_fp8_f16", VOP3_CVT_SCALE_PK_FP8BF8_F16BF16_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_FP8_BF16 : VOP3Inst<"v_cvt_scalef32_pk_fp8_bf16", VOP3_CVT_SCALE_PK_FP8BF8_F16BF16_TiedInput_Profile>; } defm V_CVT_SCALEF32_F32_FP8 : VOP3Inst<"v_cvt_scalef32_f32_fp8", VOP3_CVT_SCALE_F1632_FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_F32_FP8 : VOP3Inst<"v_cvt_scalef32_pk_f32_fp8", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_F16_FP8 : VOP3Inst<"v_cvt_scalef32_pk_f16_fp8", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_BF16_FP8 : VOP3Inst<"v_cvt_scalef32_pk_bf16_fp8", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; } let SubtargetPredicate = HasBF8ConversionScaleInsts, mayRaiseFPException = 0 in { let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in { defm V_CVT_SCALEF32_SR_BF8_BF16 : VOP3Inst<"v_cvt_scalef32_sr_bf8_bf16", VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_BF8_F16 : VOP3Inst<"v_cvt_scalef32_sr_bf8_f16", VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_BF8_F32 : VOP3Inst<"v_cvt_scalef32_sr_bf8_f32", VOP3_CVT_SCALE_SR_F8BF8_F16BF16F32_TiedInput_Profile>; defm V_CVT_SCALEF32_F16_BF8 : VOP3Inst<"v_cvt_scalef32_f16_bf8", VOP3_CVT_SCALE_F1632_FP8BF8_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_BF8_F32 : VOP3Inst<"v_cvt_scalef32_pk_bf8_f32", VOP3_CVT_SCALE_FP4FP8BF8_F32_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_BF8_F16 : VOP3Inst<"v_cvt_scalef32_pk_bf8_f16", VOP3_CVT_SCALE_PK_FP8BF8_F16BF16_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_BF8_BF16 : VOP3Inst<"v_cvt_scalef32_pk_bf8_bf16", VOP3_CVT_SCALE_PK_FP8BF8_F16BF16_TiedInput_Profile>; } defm V_CVT_SCALEF32_F32_BF8 : VOP3Inst<"v_cvt_scalef32_f32_bf8", VOP3_CVT_SCALE_F1632_FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_F32_BF8 : VOP3Inst<"v_cvt_scalef32_pk_f32_bf8", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_F16_BF8 : VOP3Inst<"v_cvt_scalef32_pk_f16_bf8", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_BF16_BF8 : VOP3Inst<"v_cvt_scalef32_pk_bf16_bf8", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; } let SubtargetPredicate = HasFP4ConversionScaleInsts, mayRaiseFPException = 0 in { defm V_CVT_SCALEF32_PK_F32_FP4 : VOP3Inst<"v_cvt_scalef32_pk_f32_fp4", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in { defm V_CVT_SCALEF32_PK_FP4_F32 : VOP3Inst<"v_cvt_scalef32_pk_fp4_f32", VOP3_CVT_SCALE_FP4_F32_TiedInput_Profile>; let Constraints = "@earlyclobber $vdst" in { defm V_CVT_SCALEF32_SR_PK_FP4_F16: VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_f16", VOP3_CVT_SCALE_SR_PK_F4_F16BF16_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_PK_FP4_BF16: VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_bf16", VOP3_CVT_SCALE_SR_PK_F4_F16BF16_TiedInput_Profile>; defm V_CVT_SCALEF32_SR_PK_FP4_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk_fp4_f32", VOP3_CVT_SCALE_SR_PK_F4_F32_TiedInput_Profile< VOP_I32_V2F32_I32_F32>>; } } defm V_CVT_SCALEF32_PK_F16_FP4 : VOP3Inst<"v_cvt_scalef32_pk_f16_fp4", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; defm V_CVT_SCALEF32_PK_BF16_FP4 : VOP3Inst<"v_cvt_scalef32_pk_bf16_fp4", VOP3_CVT_SCALE_PK_F16BF16F32_FP4FP8BF8_Profile>; // These instructions have non-standard use of op_sel. In particular they are // using op_sel bits 2 and 3 while only having two sources. let Constraints = "$vdst = $src2", DisableEncoding = "$src2" in { defm V_CVT_SCALEF32_PK_FP4_F16 : VOP3Inst<"v_cvt_scalef32_pk_fp4_f16", VOP3_CVT_SCALE_FP4_F16BF16_TiedInput_Profile>; defm V_CVT_SCALEF32_PK_FP4_BF16 : VOP3Inst<"v_cvt_scalef32_pk_fp4_bf16", VOP3_CVT_SCALE_FP4_F16BF16_TiedInput_Profile>; } } let SubtargetPredicate = HasFP6BF6ConversionScaleInsts, mayRaiseFPException = 0, Constraints = "@earlyclobber $vdst" in { defm V_CVT_SCALEF32_PK32_F32_FP6 : VOP3Inst<"v_cvt_scalef32_pk32_f32_fp6", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_f32_fp6>; defm V_CVT_SCALEF32_PK32_F32_BF6 : VOP3Inst<"v_cvt_scalef32_pk32_f32_bf6", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_f32_bf6>; defm V_CVT_SCALEF32_PK32_F16_FP6 : VOP3Inst<"v_cvt_scalef32_pk32_f16_fp6", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_f16_fp6>; defm V_CVT_SCALEF32_PK32_BF16_FP6 : VOP3Inst<"v_cvt_scalef32_pk32_bf16_fp6", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_bf16_fp6>; defm V_CVT_SCALEF32_PK32_F16_BF6 : VOP3Inst<"v_cvt_scalef32_pk32_f16_bf6", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_f16_bf6>; defm V_CVT_SCALEF32_PK32_BF16_BF6 : VOP3Inst<"v_cvt_scalef32_pk32_bf16_bf6", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_bf16_bf6>; } let SubtargetPredicate = HasF16BF16ToFP6BF6ConversionScaleInsts, mayRaiseFPException = 0, Constraints = "@earlyclobber $vdst" in { defm V_CVT_SCALEF32_PK32_FP6_F16 : VOP3Inst<"v_cvt_scalef32_pk32_fp6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_fp6_f16>; defm V_CVT_SCALEF32_PK32_BF6_F16 : VOP3Inst<"v_cvt_scalef32_pk32_bf6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_bf6_f16>; defm V_CVT_SCALEF32_PK32_FP6_BF16 : VOP3Inst<"v_cvt_scalef32_pk32_fp6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_fp6_bf16>; defm V_CVT_SCALEF32_PK32_BF6_BF16 : VOP3Inst<"v_cvt_scalef32_pk32_bf6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk32_bf6_bf16>; defm V_CVT_SCALEF32_SR_PK32_BF6_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk32_bf6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk32_bf6_bf16>; defm V_CVT_SCALEF32_SR_PK32_BF6_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk32_bf6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk32_bf6_f16>; defm V_CVT_SCALEF32_SR_PK32_BF6_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk32_bf6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk32_bf6_f32>; defm V_CVT_SCALEF32_SR_PK32_FP6_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk32_fp6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk32_fp6_bf16>; defm V_CVT_SCALEF32_SR_PK32_FP6_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk32_fp6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk32_fp6_f16>; defm V_CVT_SCALEF32_SR_PK32_FP6_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk32_fp6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk32_fp6_f32>; } let SubtargetPredicate = HasGFX950Insts, mayRaiseFPException = 0 in { defm V_CVT_SCALEF32_2XPK16_FP6_F32 : VOP3Inst<"v_cvt_scalef32_2xpk16_fp6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_2xpk16_fp6_f32>; defm V_CVT_SCALEF32_2XPK16_BF6_F32 : VOP3Inst<"v_cvt_scalef32_2xpk16_bf6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_2xpk16_bf6_f32>; } let SubtargetPredicate = HasCvtPkF16F32Inst in { let ReadsModeReg = 0 in { defm V_CVT_PK_F16_F32 : VOP3Inst<"v_cvt_pk_f16_f32", VOP3_Profile>; } def : GCNPat<(v2f16 (fpround v2f32:$src)), (V_CVT_PK_F16_F32_e64 0, (EXTRACT_SUBREG VReg_64:$src, sub0), 0, (EXTRACT_SUBREG VReg_64:$src, sub1))>; def : GCNPat<(v2f16 (fpround v2f64:$src)), (V_CVT_PK_F16_F32_e64 0, (V_CVT_F32_F64_e64 0, (EXTRACT_SUBREG VReg_128:$src, sub0_sub1)), 0, (V_CVT_F32_F64_e64 0, (EXTRACT_SUBREG VReg_128:$src, sub2_sub3)))>; def : GCNPat<(v2f16 (build_vector (f16 (fpround (f32 (VOP3Mods f32:$src0, i32:$src0_modifiers)))), (f16 (fpround (f32 (VOP3Mods f32:$src1, i32:$src1_modifiers)))))), (V_CVT_PK_F16_F32_e64 $src0_modifiers, $src0, $src1_modifiers, $src1)>; } class Cvt_Scale_FP4FP8BF8ToF16F32_Pat : GCNPat< (DstTy (node i32:$src0, f32:$src1, timm:$index)), (inst (SrcAndDstSelToOpSelXForm_0_0 $index), $src0, (SrcAndDstSelToOpSelXForm_1_0 $index), $src1) >; def : Cvt_Scale_FP4FP8BF8ToF16F32_Pat; def : Cvt_Scale_FP4FP8BF8ToF16F32_Pat; def : Cvt_Scale_FP4FP8BF8ToF16F32_Pat; def : Cvt_Scale_FP4FP8BF8ToF16F32_Pat; class Cvt_Scale_FP8BF8ToF16_Pat : GCNPat< (v2f16 (node v2f16:$vdst_in, i32:$src0, f32:$src1, timm:$src_sel, dst_sel)), (inst !if(!eq(dst_sel, 0), (SrcAndDstSelToOpSelXForm_0_0 $src_sel), (SrcAndDstSelToOpSelXForm_0_1 $src_sel)), $src0, !if(!eq(dst_sel, 0), (SrcAndDstSelToOpSelXForm_1_0 $src_sel), (SrcAndDstSelToOpSelXForm_1_1 $src_sel)), $src1, VGPR_32:$vdst_in) >; foreach DstSel = [0, -1] in { def : Cvt_Scale_FP8BF8ToF16_Pat; def : Cvt_Scale_FP8BF8ToF16_Pat; } class Cvt_Scale_PK_F32ToFP8BF8_Pat : GCNPat< (v2i16 (node v2i16:$vdst_in, f32:$src0, f32:$src1, f32:$src2, timm:$word_sel)), (inst (DstSelToOpSelXForm $word_sel), $src0, 0, $src1, 0, $src2, VGPR_32:$vdst_in) >; def : Cvt_Scale_PK_F32ToFP8BF8_Pat; def : Cvt_Scale_PK_F32ToFP8BF8_Pat; class Cvt_Scale_PK_FP8BF8ToF16F32_Pat : GCNPat< (DstTy (node i32:$src0, f32:$src1, timm:$word_sel)), (inst (SrcSelToOpSelXForm $word_sel), $src0, 0, $src1) >; def : Cvt_Scale_PK_FP8BF8ToF16F32_Pat; def : Cvt_Scale_PK_FP8BF8ToF16F32_Pat; def : Cvt_Scale_PK_FP8BF8ToF16F32_Pat; def : Cvt_Scale_PK_FP8BF8ToF16F32_Pat; def : Cvt_Scale_PK_FP8BF8ToF16F32_Pat; def : Cvt_Scale_PK_FP8BF8ToF16F32_Pat; class Cvt_Scale_PK_F16BF16ToFP8BF8_Pat : GCNPat< (v2i16 (node v2i16:$vdst_in, SrcTy:$src0, f32:$src1, timm:$word_sel)), (inst (DstSelToOpSelXForm $word_sel), $src0, 0, $src1, VGPR_32:$vdst_in) >; def : Cvt_Scale_PK_F16BF16ToFP8BF8_Pat; def : Cvt_Scale_PK_F16BF16ToFP8BF8_Pat; def : Cvt_Scale_PK_F16BF16ToFP8BF8_Pat; def : Cvt_Scale_PK_F16BF16ToFP8BF8_Pat; class Cvt_Scale_PK_F32ToFP4_Pat : GCNPat< (i32 (node i32:$vdst_in, f32:$src0, f32:$src1, f32:$src2, timm:$index)), (inst (DstSelToOpSel3XForm $index), $src0, 0, $src1, (SrcAndDstSelToOpSelXForm_2_0 $index), $src2, VGPR_32:$vdst_in) >; def : Cvt_Scale_FP4FP8BF8ToF16F32_Pat; def : Cvt_Scale_PK_F32ToFP4_Pat; class Cvt_Scale_PK_F16ToFP4_Pat : GCNPat< (i32 (node i32:$src2, SrcTy:$src0, f32:$src1, timm:$index)), (inst (DstSelToOpSel3XForm $index), $src0, 0, $src1, (SrcAndDstSelToOpSelXForm_2_0 $index), $src2) >; def : Cvt_Scale_PK_F16ToFP4_Pat; def : Cvt_Scale_PK_F16ToFP4_Pat; class Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat : GCNPat< (i32 (node i32:$vdst_in, SrcTy:$src0, i32:$src1, f32:$src2, timm:$index)), (inst (DstSelToOpSel3XForm $index), $src0, 0, $src1, (SrcAndDstSelToOpSelXForm_2_0 $index), $src2, VGPR_32:$vdst_in) >; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; def : Cvt_Scale_SR_PK_BF16F16F32ToFP4BF8FP8_Pat; let SubtargetPredicate = isGFX10Plus in { let isCommutable = 1, isReMaterializable = 1 in { defm V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile>; } // End isCommutable = 1, isReMaterializable = 1 def : ThreeOp_i32_Pats; let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in", isConvergent = 1 in { defm V_PERMLANE16_B32 : VOP3Inst<"v_permlane16_b32", VOP3_PERMLANE_Profile>; defm V_PERMLANEX16_B32 : VOP3Inst<"v_permlanex16_b32", VOP3_PERMLANE_Profile>; } // End $vdst = $vdst_in, DisableEncoding $vdst_in, isConvergent = 1 foreach vt = Reg32Types.types in { def : PermlanePat; def : PermlanePat; } let isCommutable = 1 in { defm V_ADD_NC_U16 : VOP3Inst_t16 <"v_add_nc_u16", VOP_I16_I16_I16, add>; } // End isCommutable = 1 defm V_SUB_NC_U16 : VOP3Inst_t16 <"v_sub_nc_u16", VOP_I16_I16_I16, sub>; } // End SubtargetPredicate = isGFX10Plus let True16Predicate = NotHasTrue16BitInsts, SubtargetPredicate = isGFX10Plus in { def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; // Undo sub x, c -> add x, -c canonicalization since c is more likely // an inline immediate than -c. def : GCNPat< (add i16:$src0, (i16 NegSubInlineIntConst16:$src1)), (V_SUB_NC_U16_e64 0, VSrc_b16:$src0, 0, NegSubInlineIntConst16:$src1, 0, 0) >; } // End True16Predicate = NotHasTrue16BitInsts, SubtargetPredicate = isGFX10Plus let True16Predicate = UseRealTrue16Insts in { def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; def : GCNPat< (add i16:$src0, (i16 NegSubInlineIntConst16:$src1)), (V_SUB_NC_U16_t16_e64 0, VSrc_b16:$src0, 0, NegSubInlineIntConst16:$src1, 0, 0) >; } // End True16Predicate = UseRealTrue16Insts let True16Predicate = UseFakeTrue16Insts in { def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; def : GCNPat< (add i16:$src0, (i16 NegSubInlineIntConst16:$src1)), (V_SUB_NC_U16_fake16_e64 0, VSrc_b16:$src0, 0, NegSubInlineIntConst16:$src1, 0, 0) >; } // End True16Predicate = UseFakeTrue16Insts let SubtargetPredicate = isGFX12Plus in { let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in { defm V_PERMLANE16_VAR_B32 : VOP3Inst<"v_permlane16_var_b32", VOP3_PERMLANE_VAR_Profile>; defm V_PERMLANEX16_VAR_B32 : VOP3Inst<"v_permlanex16_var_b32", VOP3_PERMLANE_VAR_Profile>; } // End $vdst = $vdst_in, DisableEncoding $vdst_in def : PermlaneVarPat; def : PermlaneVarPat; } // End SubtargetPredicate = isGFX12Plus let SubtargetPredicate = isGFX1250Plus, WaveSizePredicate = isWave32 in { defm V_PERMLANE_BCAST_B32 : VOP3Inst<"v_permlane_bcast_b32", VOP3_PERMLANE_NOOPSEL_Profile>; defm V_PERMLANE_UP_B32 : VOP3Inst<"v_permlane_up_b32", VOP3_PERMLANE_NOOPSEL_Profile>; defm V_PERMLANE_DOWN_B32 : VOP3Inst<"v_permlane_down_b32", VOP3_PERMLANE_NOOPSEL_Profile>; defm V_PERMLANE_XOR_B32 : VOP3Inst<"v_permlane_xor_b32", VOP3_PERMLANE_NOOPSEL_Profile>; defm V_PERMLANE_IDX_GEN_B32 : VOP3Inst<"v_permlane_idx_gen_b32", VOP3_PERMLANE_NOOPSEL_Profile>; def : PermlaneNoDppPat3Src; def : PermlaneNoDppPat3Src; def : PermlaneNoDppPat3Src; def : PermlaneNoDppPat3Src; def : PermlaneNoDppPat2Src; } // End SubtargetPredicate = isGFX1250Plus, WaveSizePredicate = isWave32 let HasClamp = 0, HasModifiers = 1 in { def BitOp3_B16_Profile : VOP3_BITOP3_Profile, VOP3_OPSEL>; def BitOp3_B16_t16_Profile : VOP3_Profile_True16; def BitOp3_B16_fake16_Profile : VOP3_Profile_Fake16; } let OtherPredicates = [HasBitOp3Insts] in { let isReMaterializable = 1 in { let SubtargetPredicate = isGFX940Plus in defm V_BITOP3_B16 : VOP3Inst <"v_bitop3_b16", BitOp3_B16_Profile>; let SubtargetPredicate = isGFX1250Plus in defm V_BITOP3_B16_gfx1250 : VOP3Inst_t16_with_profiles <"v_bitop3_b16_gfx1250", BitOp3_B16_Profile, BitOp3_B16_t16_Profile, BitOp3_B16_fake16_Profile>; defm V_BITOP3_B32 : VOP3Inst <"v_bitop3_b32", VOP3_BITOP3_Profile, VOP3_REGULAR>>, VOPD_Component<0x12, "v_bitop2_b32">; } def : GCNPat< (i32 (int_amdgcn_bitop3 i32:$src0, i32:$src1, i32:$src2, i32:$bitop3)), (i32 (V_BITOP3_B32_e64 VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2, timm:$bitop3)) >; def : GCNPat< (i32 (BITOP3_32 i32:$src0, i32:$src1, i32:$src2, i32:$bitop3)), (i32 (V_BITOP3_B32_e64 VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2, timm:$bitop3)) >; let SubtargetPredicate = isGFX940Plus in { def : GCNPat< (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)), (i16 (V_BITOP3_B16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0)) >; def : GCNPat< (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)), (i16 (V_BITOP3_B16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0)) >; } // End SubtargetPredicate = isGFX940Plus let SubtargetPredicate = isGFX1250Plus in { let True16Predicate = UseFakeTrue16Insts in { def : GCNPat< (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)), (i16 (V_BITOP3_B16_gfx1250_fake16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0)) >; def : GCNPat< (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)), (i16 (V_BITOP3_B16_gfx1250_fake16_e64 0, VSrc_b16:$src0, 0, VSrc_b16:$src1, 0, VSrc_b16:$src2, timm:$bitop3, 0)) >; } let True16Predicate = UseRealTrue16Insts in { def : GCNPat< (i16 (int_amdgcn_bitop3 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)), (i16 (V_BITOP3_B16_gfx1250_t16_e64 0, VSrcT_b16:$src0, 0, VSrcT_b16:$src1, 0, VSrcT_b16:$src2, timm:$bitop3, 0)) >; def : GCNPat< (i16 (BITOP3_16 i16:$src0, i16:$src1, i16:$src2, i32:$bitop3)), (i16 (V_BITOP3_B16_gfx1250_t16_e64 0, VSrcT_b16:$src0, 0, VSrcT_b16:$src1, 0, VSrcT_b16:$src2, timm:$bitop3, 0)) >; } } // End SubtargetPredicate = isGFX1250Plus } // End OtherPredicates = [HasBitOp3Insts] class DivFmasPat : GCNPat< (AMDGPUdiv_fmas (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)), (vt (VOP3Mods vt:$src1, i32:$src1_modifiers)), (vt (VOP3Mods vt:$src2, i32:$src2_modifiers)), (i1 CondReg)), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2) >; let WaveSizePredicate = isWave64 in { def : DivFmasPat; def : DivFmasPat; } let WaveSizePredicate = isWave32 in { def : DivFmasPat; def : DivFmasPat; } class VOP3_DOT_Profile : VOP3_Profile { let HasClamp = 0; let HasOMod = 0; } class VOP3_DOT_Profile_t16 : VOP3_Profile_True16 { let HasClamp = 0; let HasOMod = 0; // Override modifiers for bf16(i16) (same as float modifiers). let HasSrc0Mods = 1; let HasSrc1Mods = 1; let HasSrc2Mods = 1; let Src0ModVOP3DPP = FPVRegInputMods; let Src1ModVOP3DPP = FP32VCSrcInputMods; let Src2ModVOP3DPP = FPT16VCSrcInputMods; } class VOP3_DOT_Profile_fake16 : VOP3_Profile_Fake16 { let HasClamp = 0; let HasOMod = 0; // Override modifiers for bf16(i16) (same as float modifiers). let HasSrc0Mods = 1; let HasSrc1Mods = 1; let HasSrc2Mods = 1; let AsmVOP3Base = getAsmVOP3Base.ret; } let SubtargetPredicate = isGFX11Plus in { defm V_MAXMIN_F32 : VOP3Inst<"v_maxmin_f32", VOP3_Profile>; defm V_MINMAX_F32 : VOP3Inst<"v_minmax_f32", VOP3_Profile>; defm V_MAXMIN_F16 : VOP3Inst_t16<"v_maxmin_f16", VOP_F16_F16_F16_F16>; defm V_MINMAX_F16 : VOP3Inst_t16<"v_minmax_f16", VOP_F16_F16_F16_F16>; defm V_MAXMIN_U32 : VOP3Inst<"v_maxmin_u32", VOP3_Profile>; defm V_MINMAX_U32 : VOP3Inst<"v_minmax_u32", VOP3_Profile>; defm V_MAXMIN_I32 : VOP3Inst<"v_maxmin_i32", VOP3_Profile>; defm V_MINMAX_I32 : VOP3Inst<"v_minmax_i32", VOP3_Profile>; defm V_CVT_PK_I16_F32 : VOP3Inst<"v_cvt_pk_i16_f32", VOP3_Profile>; defm V_CVT_PK_U16_F32 : VOP3Inst<"v_cvt_pk_u16_f32", VOP3_Profile>; } // End SubtargetPredicate = isGFX11Plus class VOP3_CVT_SR_FP16_TiedInput_Profile : VOP3_CVT_SCALE_F1632_FP8BF8_TiedInput_Profile

{ let InsVOP3OpSel = (ins FP32InputMods:$src0_modifiers, Src0RC64:$src0, Int32InputMods:$src1_modifiers, Src1RC64:$src1, VGPR_32:$vdst_in, op_sel0:$op_sel); } // FIXME: GlobalISel cannot distinguish f16 and bf16 and may start using bf16 patterns // instead of less complex f16. Disable GlobalISel for these for now. def bf16_fpround : PatFrag <(ops node:$src0), (fpround $src0), [{ return true; }]> { let GISelPredicateCode = [{return false;}]; } let SubtargetPredicate = HasBF16ConversionInsts in { let ReadsModeReg = 0 in { defm V_CVT_PK_BF16_F32 : VOP3Inst<"v_cvt_pk_bf16_f32", VOP3_Profile>; defm V_CVT_SR_PK_BF16_F32 : VOP3Inst<"v_cvt_sr_pk_bf16_f32", VOP3_Profile, int_amdgcn_cvt_sr_pk_bf16_f32>; } def : GCNPat<(v2bf16 (bf16_fpround v2f32:$src)), (V_CVT_PK_BF16_F32_e64 0, (EXTRACT_SUBREG VReg_64:$src, sub0), 0, (EXTRACT_SUBREG VReg_64:$src, sub1))>; def : GCNPat<(v2bf16 (build_vector (bf16 (bf16_fpround (f32 (VOP3Mods f32:$src0, i32:$src0_modifiers)))), (bf16 (bf16_fpround (f32 (VOP3Mods f32:$src1, i32:$src1_modifiers)))))), (V_CVT_PK_BF16_F32_e64 $src0_modifiers, $src0, $src1_modifiers, $src1)>; def : GCNPat<(bf16 (bf16_fpround (f32 (VOP3Mods f32:$src0, i32:$src0_modifiers)))), (V_CVT_PK_BF16_F32_e64 $src0_modifiers, $src0, 0, (f32 (IMPLICIT_DEF)))>; } class VOP3_CVT_SCALE_PK_F16_F864_Profile : VOP3_CVT_SCALEF32_PK_F864_Profile

{ let Src0RC64 = getVOP3VRegSrcForVT.ret; let Ins64 = !con(getIns64.ret, (ins ScaleSel:$scale_sel)); let Asm64 = getAsmVOP3Base.ret # "$scale_sel"; } multiclass VOP3CvtScaleSelInst { def _e64 : VOP3InstBase> { let Pattern = [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0)), i32:$src1, i32:$scale_sel))]; } } let HasExtVOP3DPP = 0, HasModifiers = 0 in { def VOP3_V2I32_I32_I32_V2I32 : VOP3_Profile>; def VOP3_V3I32_I32_I64_V2I32 : VOP3_Profile>; def VOP3_V4I32_I64_I64_V2I32 : VOP3_Profile>; } let Src0RC64 = VSrc_NoInline_v2f16 in { def VOP3_CVT_PK_F8_F16_Profile : VOP3_Profile; def VOP3_CVT_PK_F8_F16_True16_Profile : VOP3_Profile_True16; def VOP3_CVT_PK_F8_F16_Fake16_Profile : VOP3_Profile_Fake16; } let ReadsModeReg = 0, IsPacked = 0, SubtargetPredicate = isGFX125xOnly in { defm V_CVT_PK_FP8_F16_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f16_gfx1250", VOP3_CVT_PK_F8_F16_Profile, VOP3_CVT_PK_F8_F16_True16_Profile, VOP3_CVT_PK_F8_F16_Fake16_Profile, int_amdgcn_cvt_pk_fp8_f16>; defm V_CVT_PK_BF8_F16_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f16_gfx1250", VOP3_CVT_PK_F8_F16_Profile, VOP3_CVT_PK_F8_F16_True16_Profile, VOP3_CVT_PK_F8_F16_Fake16_Profile, int_amdgcn_cvt_pk_bf8_f16>; } let HasClamp = 0, HasOpSel = 1 in { def VOP3_CVT_SR_F8_F16_Profile : VOP3_CVT_SR_F8_ByteSel_Profile; def VOP3_CVT_SR_F8_F16_True16_Profile : VOP3_Profile_True16; def VOP3_CVT_SR_F8_F16_Fake16_Profile : VOP3_Profile_Fake16; } let SubtargetPredicate = isGFX1250Plus in { let ReadsModeReg = 0 in { defm V_CVT_SR_PK_F16_F32 : VOP3Inst<"v_cvt_sr_pk_f16_f32", VOP3_Profile, int_amdgcn_cvt_sr_pk_f16_f32>; // These instructions have non-standard use of op_sel. They are using bits 2 and 3 of opsel // to select a byte in the vdst. Bits 0 and 1 are unused. let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in { defm V_CVT_SR_FP8_F16 : VOP3Inst_t16_with_profiles<"v_cvt_sr_fp8_f16", VOP3_CVT_SR_F8_F16_Profile, VOP3_CVT_SR_F8_F16_True16_Profile, VOP3_CVT_SR_F8_F16_Fake16_Profile>; defm V_CVT_SR_BF8_F16 : VOP3Inst_t16_with_profiles<"v_cvt_sr_bf8_f16", VOP3_CVT_SR_F8_F16_Profile, VOP3_CVT_SR_F8_F16_True16_Profile, VOP3_CVT_SR_F8_F16_Fake16_Profile>; } let Constraints = "@earlyclobber $vdst" in { defm V_CVT_SCALE_PK8_F16_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_fp8", VOP_V8F16_V2I32_I32, int_amdgcn_cvt_scale_pk8_f16_fp8>; defm V_CVT_SCALE_PK8_BF16_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_fp8", VOP_V8BF16_V2I32_I32, int_amdgcn_cvt_scale_pk8_bf16_fp8>; defm V_CVT_SCALE_PK8_F16_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_bf8", VOP_V8F16_V2I32_I32, int_amdgcn_cvt_scale_pk8_f16_bf8>; defm V_CVT_SCALE_PK8_BF16_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_bf8", VOP_V8BF16_V2I32_I32, int_amdgcn_cvt_scale_pk8_bf16_bf8>; defm V_CVT_SCALE_PK8_F32_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_fp8", VOP_V8F32_V2I32_I32, int_amdgcn_cvt_scale_pk8_f32_fp8>; defm V_CVT_SCALE_PK8_F32_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_bf8", VOP_V8F32_V2I32_I32, int_amdgcn_cvt_scale_pk8_f32_bf8>; defm V_CVT_SCALE_PK16_F16_FP6 : VOP3CvtScaleSelInst<"v_cvt_scale_pk16_f16_fp6", VOP_V16F16_V3I32_I32, int_amdgcn_cvt_scale_pk16_f16_fp6>; defm V_CVT_SCALE_PK16_BF16_FP6 : VOP3CvtScaleSelInst<"v_cvt_scale_pk16_bf16_fp6", VOP_V16BF16_V3I32_I32, int_amdgcn_cvt_scale_pk16_bf16_fp6>; defm V_CVT_SCALE_PK16_F16_BF6 : VOP3CvtScaleSelInst<"v_cvt_scale_pk16_f16_bf6", VOP_V16F16_V3I32_I32, int_amdgcn_cvt_scale_pk16_f16_bf6>; defm V_CVT_SCALE_PK16_BF16_BF6 : VOP3CvtScaleSelInst<"v_cvt_scale_pk16_bf16_bf6", VOP_V16BF16_V3I32_I32, int_amdgcn_cvt_scale_pk16_bf16_bf6>; defm V_CVT_SCALE_PK16_F32_FP6 : VOP3CvtScaleSelInst<"v_cvt_scale_pk16_f32_fp6", VOP_V16F32_V3I32_I32, int_amdgcn_cvt_scale_pk16_f32_fp6>; defm V_CVT_SCALE_PK16_F32_BF6 : VOP3CvtScaleSelInst<"v_cvt_scale_pk16_f32_bf6", VOP_V16F32_V3I32_I32, int_amdgcn_cvt_scale_pk16_f32_bf6>; } // End Constraints = "@earlyclobber $vdst" defm V_CVT_SCALE_PK8_F16_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_fp4", VOP_V8F16_I32_I32, int_amdgcn_cvt_scale_pk8_f16_fp4>; defm V_CVT_SCALE_PK8_BF16_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_fp4", VOP_V8BF16_I32_I32, int_amdgcn_cvt_scale_pk8_bf16_fp4>; defm V_CVT_SCALE_PK8_F32_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_fp4", VOP_V8F32_I32_I32, int_amdgcn_cvt_scale_pk8_f32_fp4>; } // End ReadsModeReg = 0 let Constraints = "@earlyclobber $vdst" in { let WaveSizePredicate = isWave32 in { defm V_CVT_SCALEF32_PK8_FP8_BF16 : VOP3Inst<"v_cvt_scalef32_pk8_fp8_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_fp8_bf16>; defm V_CVT_SCALEF32_PK8_BF8_BF16 : VOP3Inst<"v_cvt_scalef32_pk8_bf8_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_bf8_bf16>; defm V_CVT_SCALEF32_PK8_FP8_F16 : VOP3Inst<"v_cvt_scalef32_pk8_fp8_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_fp8_f16>; defm V_CVT_SCALEF32_PK8_BF8_F16 : VOP3Inst<"v_cvt_scalef32_pk8_bf8_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_bf8_f16>; defm V_CVT_SCALEF32_PK8_FP8_F32 : VOP3Inst<"v_cvt_scalef32_pk8_fp8_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_fp8_f32>; defm V_CVT_SCALEF32_PK8_BF8_F32 : VOP3Inst<"v_cvt_scalef32_pk8_bf8_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_bf8_f32>; defm V_CVT_SCALEF32_PK8_FP4_F32 : VOP3Inst<"v_cvt_scalef32_pk8_fp4_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_fp4_f32>; defm V_CVT_SCALEF32_PK8_FP4_F16 : VOP3Inst<"v_cvt_scalef32_pk8_fp4_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_fp4_f16>; defm V_CVT_SCALEF32_PK8_FP4_BF16 : VOP3Inst<"v_cvt_scalef32_pk8_fp4_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk8_fp4_bf16>; } // End WaveSizePredicate = isWave32 defm V_CVT_SCALEF32_PK16_FP6_F32 : VOP3Inst<"v_cvt_scalef32_pk16_fp6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk16_fp6_f32>; defm V_CVT_SCALEF32_PK16_BF6_F32 : VOP3Inst<"v_cvt_scalef32_pk16_bf6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk16_bf6_f32>; defm V_CVT_SCALEF32_PK16_FP6_F16 : VOP3Inst<"v_cvt_scalef32_pk16_fp6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk16_fp6_f16>; defm V_CVT_SCALEF32_PK16_BF6_F16 : VOP3Inst<"v_cvt_scalef32_pk16_bf6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk16_bf6_f16>; defm V_CVT_SCALEF32_PK16_FP6_BF16 : VOP3Inst<"v_cvt_scalef32_pk16_fp6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk16_fp6_bf16>; defm V_CVT_SCALEF32_PK16_BF6_BF16 : VOP3Inst<"v_cvt_scalef32_pk16_bf6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_pk16_bf6_bf16>; let WaveSizePredicate = isWave32 in { defm V_CVT_SCALEF32_SR_PK8_FP8_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk8_fp8_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_fp8_bf16>; defm V_CVT_SCALEF32_SR_PK8_BF8_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk8_bf8_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_bf8_bf16>; defm V_CVT_SCALEF32_SR_PK8_FP8_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk8_fp8_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_fp8_f16>; defm V_CVT_SCALEF32_SR_PK8_BF8_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk8_bf8_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_bf8_f16>; defm V_CVT_SCALEF32_SR_PK8_FP8_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk8_fp8_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_fp8_f32>; defm V_CVT_SCALEF32_SR_PK8_BF8_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk8_bf8_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_bf8_f32>; defm V_CVT_SCALEF32_SR_PK8_FP4_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk8_fp4_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_fp4_f32>; defm V_CVT_SCALEF32_SR_PK8_FP4_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk8_fp4_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_fp4_f16>; defm V_CVT_SCALEF32_SR_PK8_FP4_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk8_fp4_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk8_fp4_bf16>; } // End WaveSizePredicate = isWave32 defm V_CVT_SCALEF32_SR_PK16_BF6_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk16_bf6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk16_bf6_bf16>; defm V_CVT_SCALEF32_SR_PK16_BF6_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk16_bf6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk16_bf6_f16>; defm V_CVT_SCALEF32_SR_PK16_BF6_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk16_bf6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk16_bf6_f32>; defm V_CVT_SCALEF32_SR_PK16_FP6_BF16 : VOP3Inst<"v_cvt_scalef32_sr_pk16_fp6_bf16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk16_fp6_bf16>; defm V_CVT_SCALEF32_SR_PK16_FP6_F16 : VOP3Inst<"v_cvt_scalef32_sr_pk16_fp6_f16", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk16_fp6_f16>; defm V_CVT_SCALEF32_SR_PK16_FP6_F32 : VOP3Inst<"v_cvt_scalef32_sr_pk16_fp6_f32", VOP3_CVT_SCALEF32_PK_F864_Profile, int_amdgcn_cvt_scalef32_sr_pk16_fp6_f32>; } // End Constraints = "@earlyclobber $vdst" let True16Predicate = UseRealTrue16Insts in { def : Cvt_SR_F8_ByteSel_Pat; def : Cvt_SR_F8_ByteSel_Pat; } let True16Predicate = UseFakeTrue16Insts in { def : Cvt_SR_F8_ByteSel_Pat; def : Cvt_SR_F8_ByteSel_Pat; } } // End SubtargetPredicate = isGFX1250Plus let SubtargetPredicate = HasTensorCvtLutInsts in { defm V_PERM_PK16_B4_U4 : VOP3Inst<"v_perm_pk16_b4_u4", VOP3_V2I32_I32_I32_V2I32, int_amdgcn_perm_pk16_b4_u4>; defm V_PERM_PK16_B6_U4 : VOP3Inst<"v_perm_pk16_b6_u4", VOP3_V3I32_I32_I64_V2I32, int_amdgcn_perm_pk16_b6_u4>; defm V_PERM_PK16_B8_U4 : VOP3Inst<"v_perm_pk16_b8_u4", VOP3_V4I32_I64_I64_V2I32, int_amdgcn_perm_pk16_b8_u4>; } // End SubtargetPredicate = HasTensorCvtLutInsts class Cvt_Scale_Sr_F32ToBF16F16_Pat : GCNPat< (DstTy (node DstTy:$vdst_in, f32:$src0, i32:$src1, timm:$word_sel)), (inst (DstSelToOpSelXForm $word_sel), $src0, 0, $src1, VGPR_32:$vdst_in) >; let SubtargetPredicate = HasF32ToF16BF16ConversionSRInsts in { let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in { defm V_CVT_SR_F16_F32 : VOP3Inst<"v_cvt_sr_f16_f32", VOP3_CVT_SR_FP16_TiedInput_Profile>; defm V_CVT_SR_BF16_F32 : VOP3Inst<"v_cvt_sr_bf16_f32", VOP3_CVT_SR_FP16_TiedInput_Profile>; } def : Cvt_Scale_Sr_F32ToBF16F16_Pat; def : Cvt_Scale_Sr_F32ToBF16F16_Pat; } let SubtargetPredicate = HasIEEEMinimumMaximumInsts, ReadsModeReg = 0 in { defm V_MAXIMUMMINIMUM_F32 : VOP3Inst<"v_maximumminimum_f32", VOP3_Profile>; defm V_MINIMUMMAXIMUM_F32 : VOP3Inst<"v_minimummaximum_f32", VOP3_Profile>; defm V_MAXIMUMMINIMUM_F16 : VOP3Inst_t16<"v_maximumminimum_f16", VOP_F16_F16_F16_F16>; defm V_MINIMUMMAXIMUM_F16 : VOP3Inst_t16<"v_minimummaximum_f16", VOP_F16_F16_F16_F16>; } // End SubtargetPredicate = HasIEEEMinimumMaximumInsts, ReadsModeReg = 0 let SubtargetPredicate = HasDot9Insts, IsDOT=1 in { defm V_DOT2_F16_F16 : VOP3Inst_t16_with_profiles<"v_dot2_f16_f16", VOP3_DOT_Profile, VOP3_DOT_Profile_t16, VOP3_DOT_Profile_fake16, int_amdgcn_fdot2_f16_f16>; defm V_DOT2_BF16_BF16 : VOP3Inst_t16_with_profiles<"v_dot2_bf16_bf16", VOP3_DOT_Profile, VOP3_DOT_Profile_t16, VOP3_DOT_Profile_fake16, int_amdgcn_fdot2_bf16_bf16>; } class VOP_Pseudo_Scalar : VOPProfile<[dstVt, srcVt, untyped, untyped]> { let DstRC = VOPDstOperand; let Src0RC64 = SrcOp; let HasOMod = 1; let HasModifiers = 1; } def VOP_Pseudo_Scalar_F32 : VOP_Pseudo_Scalar; def VOP_Pseudo_Scalar_F16 : VOP_Pseudo_Scalar; let SubtargetPredicate = HasPseudoScalarTrans, TRANS = 1, isReMaterializable = 1, SchedRW = [WritePseudoScalarTrans] in { defm V_S_EXP_F32 : VOP3PseudoScalarInst<"v_s_exp_f32", VOP_Pseudo_Scalar_F32, AMDGPUexp>; defm V_S_EXP_F16 : VOP3PseudoScalarInst<"v_s_exp_f16", VOP_Pseudo_Scalar_F16>; defm V_S_LOG_F32 : VOP3PseudoScalarInst<"v_s_log_f32", VOP_Pseudo_Scalar_F32, AMDGPUlog>; defm V_S_LOG_F16 : VOP3PseudoScalarInst<"v_s_log_f16", VOP_Pseudo_Scalar_F16>; defm V_S_RCP_F32 : VOP3PseudoScalarInst<"v_s_rcp_f32", VOP_Pseudo_Scalar_F32, AMDGPUrcp>; defm V_S_RCP_F16 : VOP3PseudoScalarInst<"v_s_rcp_f16", VOP_Pseudo_Scalar_F16>; defm V_S_RSQ_F32 : VOP3PseudoScalarInst<"v_s_rsq_f32", VOP_Pseudo_Scalar_F32, AMDGPUrsq>; defm V_S_RSQ_F16 : VOP3PseudoScalarInst<"v_s_rsq_f16", VOP_Pseudo_Scalar_F16>; defm V_S_SQRT_F32 : VOP3PseudoScalarInst<"v_s_sqrt_f32", VOP_Pseudo_Scalar_F32, any_amdgcn_sqrt>; defm V_S_SQRT_F16 : VOP3PseudoScalarInst<"v_s_sqrt_f16", VOP_Pseudo_Scalar_F16>; } class PseudoScalarPatF16 : GCNPat < (f16 (UniformUnaryFrag (f16 (VOP3Mods0 f16:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)))), (f16 (COPY_TO_REGCLASS (f32 (inst i32:$src0_modifiers, f16:$src0, i1:$clamp, i32:$omod)), SReg_32_XEXEC)) >; let SubtargetPredicate = HasPseudoScalarTrans in { def : PseudoScalarPatF16; def : PseudoScalarPatF16; def : PseudoScalarPatF16; def : PseudoScalarPatF16; def : PseudoScalarPatF16; } let HasModifiers = 1 in def ASHR_PK_I8_Profile : VOP3_Profile; let SubtargetPredicate = HasAshrPkInsts, isReMaterializable = 1 in { defm V_ASHR_PK_I8_I32 : VOP3Inst<"v_ashr_pk_i8_i32", ASHR_PK_I8_Profile, int_amdgcn_ashr_pk_i8_i32>; defm V_ASHR_PK_U8_I32 : VOP3Inst<"v_ashr_pk_u8_i32", ASHR_PK_I8_Profile, int_amdgcn_ashr_pk_u8_i32>; } // End SubtargetPredicate = HasAshrPkInsts, isReMaterializable = 1 class AshrPkI8Pat: GCNPat< (i32 (or (i32 (shl (i32 (AMDGPUsmed3 (i32 (sra i32:$src1, i32:$src2)), (i32 lo), (i32 hi))), (i32 8))), (i32 (and (i32 (AMDGPUsmed3 (i32 (sra i32:$src0, i32:$src2)), (i32 lo), (i32 hi))), (i32 255))))), (inst 0, VSrc_b32:$src0, 0, VSrc_b32:$src1, 0, VSrc_b32:$src2, 0 ) >; class AshrPkU8Pat: GCNPat< (i32 (or (i32 (shl (i32 (AMDGPUsmed3 (i32 (sra i32:$src1, i32:$src2)), (i32 lo), (i32 hi))), (i32 8))), (i32 (AMDGPUsmed3 (i32 (sra i32:$src0, i32:$src2)), (i32 lo), (i32 hi))))), (inst 0, VSrc_b32:$src0, 0, VSrc_b32:$src1, 0, VSrc_b32:$src2, 0 ) >; let SubtargetPredicate = HasAshrPkInsts in { def : AshrPkI8Pat; def : AshrPkU8Pat; } //===----------------------------------------------------------------------===// // Integer Clamp Patterns //===----------------------------------------------------------------------===// class getClampPat { dag ret3 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2)); dag ret2 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1)); dag ret1 = (P.DstVT (node P.Src0VT:$src0)); dag ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getClampRes { dag ret3 = (inst P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, (i1 0)); dag ret2 = (inst P.Src0VT:$src0, P.Src1VT:$src1, (i1 0)); dag ret1 = (inst P.Src0VT:$src0, (i1 0)); dag ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class IntClampPat : GCNPat< getClampPat.ret, getClampRes.ret >; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; //===----------------------------------------------------------------------===// // Floating-point operation Patterns //===----------------------------------------------------------------------===// // Implement fminimum(x, y) by using minimum3(x, y, y) class MinimumMaximumByMinimum3Maximum3 : GCNPat< (vt (node (VOP3Mods vt:$src0, i32:$src0_mods), (VOP3Mods vt:$src1, i32:$src1_mods))), (inst $src0_mods, $src0, $src1_mods, $src1, $src1_mods, $src1) >; // Prefer the real 2 operand form if legal let SubtargetPredicate = HasMinimum3Maximum3F32 in { def : MinimumMaximumByMinimum3Maximum3; def : MinimumMaximumByMinimum3Maximum3; } //===----------------------------------------------------------------------===// // Target-specific instruction encodings. //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // GFX12. //===----------------------------------------------------------------------===// defm V_MIN3_NUM_F32 : VOP3_Realtriple_with_name_gfx12<0x229, "V_MIN3_F32", "v_min3_num_f32">; defm V_MAX3_NUM_F32 : VOP3_Realtriple_with_name_gfx12<0x22a, "V_MAX3_F32", "v_max3_num_f32">; defm V_MIN3_NUM_F16 : VOP3_Realtriple_t16_and_fake16_gfx12<0x22b, "v_min3_num_f16", "V_MIN3_F16", "v_min3_f16">; defm V_MAX3_NUM_F16 : VOP3_Realtriple_t16_and_fake16_gfx12<0x22c, "v_max3_num_f16", "V_MAX3_F16", "v_max3_f16">; defm V_MINIMUM3_F32 : VOP3Only_Realtriple_gfx12<0x22d>; defm V_MAXIMUM3_F32 : VOP3Only_Realtriple_gfx12<0x22e>; defm V_MINIMUM3_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x22f, "v_minimum3_f16">; defm V_MAXIMUM3_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x230, "v_maximum3_f16">; defm V_MED3_NUM_F32 : VOP3_Realtriple_with_name_gfx12<0x231, "V_MED3_F32", "v_med3_num_f32">; defm V_MED3_NUM_F16 : VOP3_Realtriple_t16_and_fake16_gfx12<0x232, "v_med3_num_f16", "V_MED3_F16", "v_med3_f16">; defm V_MINMAX_NUM_F32 : VOP3_Realtriple_with_name_gfx12<0x268, "V_MINMAX_F32", "v_minmax_num_f32">; defm V_MAXMIN_NUM_F32 : VOP3_Realtriple_with_name_gfx12<0x269, "V_MAXMIN_F32", "v_maxmin_num_f32">; defm V_MINMAX_NUM_F16 : VOP3_Realtriple_t16_and_fake16_gfx12<0x26a, "v_minmax_num_f16", "V_MINMAX_F16", "v_minmax_f16">; defm V_MAXMIN_NUM_F16 : VOP3_Realtriple_t16_and_fake16_gfx12<0x26b, "v_maxmin_num_f16", "V_MAXMIN_F16", "v_maxmin_f16">; defm V_MINIMUMMAXIMUM_F32 : VOP3Only_Realtriple_gfx12<0x26c>; defm V_MAXIMUMMINIMUM_F32 : VOP3Only_Realtriple_gfx12<0x26d>; defm V_MINIMUMMAXIMUM_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x26e, "v_minimummaximum_f16">; defm V_MAXIMUMMINIMUM_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x26f, "v_maximumminimum_f16">; defm V_S_EXP_F32 : VOP3Only_Real_Base_gfx12<0x280>; defm V_S_EXP_F16 : VOP3Only_Real_Base_gfx12<0x281>; defm V_S_LOG_F32 : VOP3Only_Real_Base_gfx12<0x282>; defm V_S_LOG_F16 : VOP3Only_Real_Base_gfx12<0x283>; defm V_S_RCP_F32 : VOP3Only_Real_Base_gfx12<0x284>; defm V_S_RCP_F16 : VOP3Only_Real_Base_gfx12<0x285>; defm V_S_RSQ_F32 : VOP3Only_Real_Base_gfx12<0x286>; defm V_S_RSQ_F16 : VOP3Only_Real_Base_gfx12<0x287>; defm V_S_SQRT_F32 : VOP3Only_Real_Base_gfx12<0x288>; defm V_S_SQRT_F16 : VOP3Only_Real_Base_gfx12<0x289>; defm V_MAD_CO_U64_U32 : VOP3be_Real_with_name_gfx12<0x2fe, "V_MAD_U64_U32", "v_mad_co_u64_u32">; defm V_MAD_CO_I64_I32 : VOP3be_Real_with_name_gfx12<0x2ff, "V_MAD_I64_I32", "v_mad_co_i64_i32">; defm V_MINIMUM_F64 : VOP3Only_Real_Base_gfx12<0x341>; defm V_MAXIMUM_F64 : VOP3Only_Real_Base_gfx12<0x342>; defm V_MINIMUM_F32 : VOP3Only_Realtriple_gfx12<0x365>; defm V_MAXIMUM_F32 : VOP3Only_Realtriple_gfx12<0x366>; defm V_MINIMUM_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x367, "v_minimum_f16">; defm V_MAXIMUM_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x368, "v_maximum_f16">; defm V_PERMLANE16_VAR_B32 : VOP3Only_Real_Base_gfx12<0x30f>; defm V_PERMLANEX16_VAR_B32 : VOP3Only_Real_Base_gfx12<0x310>; defm V_BITOP3_B16_gfx1250 : VOP3_Real_BITOP3_t16_and_fake16_gfx1250<0x233, "v_bitop3_b16">; defm V_BITOP3_B32 : VOP3_Real_BITOP3_gfx1250<0x234>; defm V_MAD_U32 : VOP3Only_Realtriple_gfx1250<0x235>; defm V_MAD_NC_U64_U32 : VOP3Only_Realtriple_gfx1250<0x2fa>; defm V_MAD_NC_I64_I32 : VOP3Only_Realtriple_gfx1250<0x2fb>; defm V_MIN_U64 : VOP3Only_Realtriple_gfx1250<0x318>; defm V_MAX_U64 : VOP3Only_Realtriple_gfx1250<0x319>; defm V_MIN_I64 : VOP3Only_Realtriple_gfx1250<0x31a>; defm V_MAX_I64 : VOP3Only_Realtriple_gfx1250<0x31b>; defm V_ADD_MAX_I32 : VOP3Only_Realtriple_gfx1250<0x25e>; defm V_ADD_MAX_U32 : VOP3Only_Realtriple_gfx1250<0x25f>; defm V_ADD_MIN_I32 : VOP3Only_Realtriple_gfx1250<0x260>; defm V_ADD_MIN_U32 : VOP3Only_Realtriple_gfx1250<0x261>; defm V_PERMLANE_BCAST_B32 : VOP3Only_Real_Base_gfx12<0x270>; defm V_PERMLANE_UP_B32 : VOP3Only_Real_Base_gfx12<0x271>; defm V_PERMLANE_DOWN_B32 : VOP3Only_Real_Base_gfx12<0x272>; defm V_PERMLANE_XOR_B32 : VOP3Only_Real_Base_gfx12<0x273>; defm V_PERMLANE_IDX_GEN_B32 : VOP3Only_Real_Base_gfx12<0x314>; //===----------------------------------------------------------------------===// // GFX11, GFX12 //===----------------------------------------------------------------------===// multiclass VOP3_Real_with_name_gfx11_gfx12 op, string opName, string asmName> : VOP3_Real_with_name, VOP3_Real_with_name; multiclass VOP3_Realtriple_gfx11_gfx12 op> : VOP3_Realtriple, VOP3_Realtriple; multiclass VOP3_Real_Base_gfx11_gfx12 op> : VOP3_Real_Base, VOP3_Real_Base; multiclass VOP3_Real_Base_gfx11_gfx12_not_gfx1250 op> : VOP3_Real_Base, VOP3_Real_Base; multiclass VOP3_Realtriple_with_name_gfx11_gfx12 op, string opName, string asmName> : VOP3_Realtriple_with_name, VOP3_Realtriple_with_name; multiclass VOP3Dot_Realtriple_t16_and_fake16_gfx11_gfx12 op, string asmName, string opName = NAME> { defm _t16: VOP3Dot_Realtriple_gfx11_gfx12; defm _fake16: VOP3Dot_Realtriple_gfx11_gfx12; } multiclass VOP3_Realtriple_t16_gfx11_gfx12 op, string asmName, string opName = NAME, string pseudo_mnemonic = "", bit isSingle = 0> : VOP3_Realtriple_with_name, VOP3_Realtriple_with_name; multiclass VOP3_Realtriple_t16_and_fake16_gfx11_gfx12 op, string asmName, string opName = NAME, string pseudo_mnemonic = "", bit isSingle = 0> { defm opName#"_t16": VOP3_Realtriple_t16_gfx11_gfx12; defm opName#"_fake16": VOP3_Realtriple_t16_gfx11_gfx12; } multiclass VOP3be_Real_gfx11_gfx12 op, string opName, string asmName> : VOP3be_Real, VOP3be_Real; multiclass VOP3_Real_No_Suffix_gfx11_gfx12 op> : VOP3_Real_No_Suffix, VOP3_Real_No_Suffix; defm V_FMA_DX9_ZERO_F32 : VOP3_Real_with_name_gfx11_gfx12<0x209, "V_FMA_LEGACY_F32", "v_fma_dx9_zero_f32">; defm V_MAD_I32_I24 : VOP3_Realtriple_gfx11_gfx12<0x20a>; defm V_MAD_U32_U24 : VOP3_Realtriple_gfx11_gfx12<0x20b>; defm V_CUBEID_F32 : VOP3_Realtriple_gfx11_gfx12<0x20c>; defm V_CUBESC_F32 : VOP3_Realtriple_gfx11_gfx12<0x20d>; defm V_CUBETC_F32 : VOP3_Realtriple_gfx11_gfx12<0x20e>; defm V_CUBEMA_F32 : VOP3_Realtriple_gfx11_gfx12<0x20f>; defm V_BFE_U32 : VOP3_Realtriple_gfx11_gfx12<0x210>; defm V_BFE_I32 : VOP3_Realtriple_gfx11_gfx12<0x211>; defm V_BFI_B32 : VOP3_Realtriple_gfx11_gfx12<0x212>; defm V_FMA_F32 : VOP3_Realtriple_gfx11_gfx12<0x213>; defm V_FMA_F64 : VOP3_Real_Base_gfx11_gfx12<0x214>; defm V_LERP_U8 : VOP3_Realtriple_gfx11_gfx12<0x215>; defm V_ALIGNBIT_B32 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x216, "v_alignbit_b32">; defm V_ALIGNBYTE_B32 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x217, "v_alignbyte_b32">; defm V_MULLIT_F32 : VOP3_Realtriple_gfx11_gfx12<0x218>; defm V_MIN3_F32 : VOP3_Realtriple_gfx11<0x219>; defm V_MIN3_I32 : VOP3_Realtriple_gfx11_gfx12<0x21a>; defm V_MIN3_U32 : VOP3_Realtriple_gfx11_gfx12<0x21b>; defm V_MAX3_F32 : VOP3_Realtriple_gfx11<0x21c>; defm V_MAX3_I32 : VOP3_Realtriple_gfx11_gfx12<0x21d>; defm V_MAX3_U32 : VOP3_Realtriple_gfx11_gfx12<0x21e>; defm V_MED3_F32 : VOP3_Realtriple_gfx11<0x21f>; defm V_MED3_I32 : VOP3_Realtriple_gfx11_gfx12<0x220>; defm V_MED3_U32 : VOP3_Realtriple_gfx11_gfx12<0x221>; defm V_SAD_U8 : VOP3_Realtriple_gfx11_gfx12<0x222>; defm V_SAD_HI_U8 : VOP3_Realtriple_gfx11_gfx12<0x223>; defm V_SAD_U16 : VOP3_Realtriple_gfx11_gfx12<0x224>; defm V_SAD_U32 : VOP3_Realtriple_gfx11_gfx12<0x225>; defm V_CVT_PK_U8_F32 : VOP3_Realtriple_gfx11_gfx12<0x226>; defm V_DIV_FIXUP_F32 : VOP3_Real_Base_gfx11_gfx12<0x227>; defm V_DIV_FIXUP_F64 : VOP3_Real_Base_gfx11_gfx12<0x228>; defm V_DIV_FMAS_F32 : VOP3_Real_Base_gfx11_gfx12<0x237>; defm V_DIV_FMAS_F64 : VOP3_Real_Base_gfx11_gfx12<0x238>; defm V_MSAD_U8 : VOP3_Realtriple_gfx11_gfx12<0x239>; defm V_QSAD_PK_U16_U8 : VOP3_Real_Base_gfx11_gfx12<0x23a>; defm V_MQSAD_PK_U16_U8 : VOP3_Real_Base_gfx11_gfx12<0x23b>; defm V_MQSAD_U32_U8 : VOP3_Real_Base_gfx11_gfx12<0x23d>; defm V_XOR3_B32 : VOP3_Realtriple_gfx11_gfx12<0x240>; defm V_MAD_U16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x241, "v_mad_u16", "V_MAD_U16_gfx9">; defm V_PERM_B32 : VOP3_Realtriple_gfx11_gfx12<0x244>; defm V_XAD_U32 : VOP3_Realtriple_gfx11_gfx12<0x245>; defm V_LSHL_ADD_U32 : VOP3_Realtriple_gfx11_gfx12<0x246>; defm V_ADD_LSHL_U32 : VOP3_Realtriple_gfx11_gfx12<0x247>; defm V_FMA_F16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x248, "v_fma_f16", "V_FMA_F16_gfx9">; defm V_MIN3_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx11<0x249, "v_min3_f16">; defm V_MIN3_I16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x24a, "v_min3_i16">; defm V_MIN3_U16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x24b, "v_min3_u16">; defm V_MAX3_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx11<0x24c, "v_max3_f16">; defm V_MAX3_I16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x24d, "v_max3_i16">; defm V_MAX3_U16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x24e, "v_max3_u16">; defm V_MED3_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx11<0x24f, "v_med3_f16">; defm V_MED3_I16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x250, "v_med3_i16">; defm V_MED3_U16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x251, "v_med3_u16">; defm V_MAD_I16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x253, "v_mad_i16", "V_MAD_I16_gfx9">; defm V_DIV_FIXUP_F16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x254, "v_div_fixup_f16", "V_DIV_FIXUP_F16_gfx9">; defm V_ADD3_U32 : VOP3_Realtriple_gfx11_gfx12<0x255>; defm V_LSHL_OR_B32 : VOP3_Realtriple_gfx11_gfx12<0x256>; defm V_AND_OR_B32 : VOP3_Realtriple_gfx11_gfx12<0x257>; defm V_OR3_B32 : VOP3_Realtriple_gfx11_gfx12<0x258>; defm V_MAD_U32_U16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x259, "v_mad_u32_u16">; defm V_MAD_I32_I16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x25a, "v_mad_i32_i16">; defm V_PERMLANE16_B32 : VOP3_Real_Base_gfx11_gfx12<0x25b>; defm V_PERMLANEX16_B32 : VOP3_Real_Base_gfx11_gfx12<0x25c>; defm V_MAXMIN_F32 : VOP3_Realtriple_gfx11<0x25e>; defm V_MINMAX_F32 : VOP3_Realtriple_gfx11<0x25f>; defm V_MAXMIN_F16 : VOP3_Realtriple_t16_and_fake16_gfx11<0x260, "v_maxmin_f16">; defm V_MINMAX_F16 : VOP3_Realtriple_t16_and_fake16_gfx11<0x261, "v_minmax_f16">; defm V_MAXMIN_U32 : VOP3_Realtriple_gfx11_gfx12<0x262>; defm V_MINMAX_U32 : VOP3_Realtriple_gfx11_gfx12<0x263>; defm V_MAXMIN_I32 : VOP3_Realtriple_gfx11_gfx12<0x264>; defm V_MINMAX_I32 : VOP3_Realtriple_gfx11_gfx12<0x265>; defm V_DOT2_F16_F16 : VOP3Dot_Realtriple_t16_and_fake16_gfx11_gfx12<0x266, "v_dot2_f16_f16">; defm V_DOT2_BF16_BF16 : VOP3Dot_Realtriple_t16_and_fake16_gfx11_gfx12<0x267, "v_dot2_bf16_bf16">; defm V_DIV_SCALE_F32 : VOP3be_Real_gfx11_gfx12<0x2fc, "V_DIV_SCALE_F32", "v_div_scale_f32">; defm V_DIV_SCALE_F64 : VOP3be_Real_gfx11_gfx12<0x2fd, "V_DIV_SCALE_F64", "v_div_scale_f64">; defm V_MAD_U64_U32_gfx11 : VOP3be_Real_gfx11<0x2fe, "V_MAD_U64_U32_gfx11", "v_mad_u64_u32">; defm V_MAD_I64_I32_gfx11 : VOP3be_Real_gfx11<0x2ff, "V_MAD_I64_I32_gfx11", "v_mad_i64_i32">; defm V_ADD_NC_U16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x303, "v_add_nc_u16">; defm V_SUB_NC_U16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x304, "v_sub_nc_u16">; defm V_MUL_LO_U16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x305, "v_mul_lo_u16">; defm V_CVT_PK_I16_F32 : VOP3_Realtriple_gfx11_gfx12<0x306>; defm V_CVT_PK_U16_F32 : VOP3_Realtriple_gfx11_gfx12<0x307>; defm V_MAX_U16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x309, "v_max_u16">; defm V_MAX_I16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x30a, "v_max_i16">; defm V_MIN_U16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x30b, "v_min_u16">; defm V_MIN_I16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x30c, "v_min_i16">; defm V_ADD_NC_I16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x30d, "v_add_nc_i16", "V_ADD_I16">; defm V_SUB_NC_I16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x30e, "v_sub_nc_i16", "V_SUB_I16">; defm V_PACK_B32_F16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x311, "v_pack_b32_f16">; defm V_CVT_PK_NORM_I16_F16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x312, "v_cvt_pk_norm_i16_f16", "V_CVT_PKNORM_I16_F16", "v_cvt_pknorm_i16_f16">; defm V_CVT_PK_NORM_U16_F16 : VOP3_Realtriple_t16_and_fake16_gfx11_gfx12<0x313, "v_cvt_pk_norm_u16_f16", "V_CVT_PKNORM_U16_F16", "v_cvt_pknorm_u16_f16">; defm V_SUB_NC_I32 : VOP3_Realtriple_with_name_gfx11_gfx12<0x325, "V_SUB_I32", "v_sub_nc_i32">; defm V_ADD_NC_I32 : VOP3_Realtriple_with_name_gfx11_gfx12<0x326, "V_ADD_I32", "v_add_nc_i32">; defm V_ADD_F64 : VOP3_Real_Base_gfx11<0x327>; defm V_MUL_F64 : VOP3_Real_Base_gfx11<0x328>; defm V_MIN_F64 : VOP3_Real_Base_gfx11<0x329>; defm V_MAX_F64 : VOP3_Real_Base_gfx11<0x32a>; defm V_LDEXP_F64 : VOP3_Real_Base_gfx11_gfx12<0x32b>; defm V_MUL_LO_U32 : VOP3_Real_Base_gfx11_gfx12_not_gfx1250<0x32c>; defm V_MUL_HI_U32 : VOP3_Real_Base_gfx11_gfx12_not_gfx1250<0x32d>; defm V_MUL_HI_I32 : VOP3_Real_Base_gfx11_gfx12_not_gfx1250<0x32e>; defm V_TRIG_PREOP_F64 : VOP3_Real_Base_gfx11_gfx12<0x32f>; defm V_LSHLREV_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x338, "v_lshlrev_b16">; defm V_LSHRREV_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x339, "v_lshrrev_b16">; defm V_ASHRREV_I16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x33a, "v_ashrrev_i16">; defm V_LSHLREV_B64 : VOP3_Real_Base_gfx11<0x33c>; defm V_LSHRREV_B64 : VOP3_Real_Base_gfx11_gfx12<0x33d>; defm V_ASHRREV_I64 : VOP3_Real_Base_gfx11_gfx12<0x33e>; defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx11_gfx12<0x360>; // Pseudo in VOP2 let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in { defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx11_gfx12<0x361>; // Pseudo in VOP2 } // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) defm V_AND_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x362, "v_and_b16">; defm V_OR_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x363, "v_or_b16">; defm V_XOR_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x364, "v_xor_b16">; defm V_CVT_PK_FP8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12_not_gfx1250<0x369, "v_cvt_pk_fp8_f32">; defm V_CVT_PK_FP8_F32_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x369, "v_cvt_pk_fp8_f32">; defm V_CVT_PK_BF8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x36a, "v_cvt_pk_bf8_f32">; defm V_CVT_SR_FP8_F32_gfx12 : VOP3_Realtriple_with_name_gfx11_gfx12_not_gfx1250<0x36b, "V_CVT_SR_FP8_F32_gfx12", "v_cvt_sr_fp8_f32">; defm V_CVT_SR_FP8_F32_gfx1250 : VOP3Only_Realtriple_with_name_gfx1250<0x36b, "V_CVT_SR_FP8_F32_gfx1250", "v_cvt_sr_fp8_f32">; defm V_CVT_SR_BF8_F32_gfx12 : VOP3_Realtriple_with_name_gfx11_gfx12<0x36c, "V_CVT_SR_BF8_F32_gfx12", "v_cvt_sr_bf8_f32">; let AssemblerPredicate = isGFX11Plus in { def : AMDGPUMnemonicAlias<"v_add3_nc_u32", "v_add3_u32">; def : AMDGPUMnemonicAlias<"v_xor_add_u32", "v_xad_u32">; } // These instructions differ from GFX12 variant by supporting DPP: defm V_MUL_LO_U32 : VOP3Only_Realtriple_gfx1250<0x32c>; defm V_MUL_HI_U32 : VOP3Only_Realtriple_gfx1250<0x32d>; defm V_MUL_HI_I32 : VOP3Only_Realtriple_gfx1250<0x32e>; defm V_PERM_PK16_B4_U4 : VOP3Only_Real_Base_gfx1250<0x23f>; defm V_PERM_PK16_B6_U4 : VOP3Only_Real_Base_gfx1250<0x242>; defm V_PERM_PK16_B8_U4 : VOP3Only_Real_Base_gfx1250<0x243>; defm V_LSHL_ADD_U64 : VOP3Only_Realtriple_gfx1250<0x252>; defm V_ASHR_PK_I8_I32 : VOP3Only_Realtriple_gfx1250<0x290>; defm V_ASHR_PK_U8_I32 : VOP3Only_Realtriple_gfx1250<0x291>; defm V_CVT_SCALE_PK8_F16_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x29f>; defm V_CVT_SCALE_PK8_BF16_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x2a0>; defm V_CVT_SCALE_PK8_F32_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x2a1>; defm V_CVT_SCALE_PK8_F16_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2a8>; defm V_CVT_SCALE_PK8_BF16_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2a9>; defm V_CVT_SCALE_PK8_F32_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2aa>; defm V_CVT_SCALE_PK8_F16_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ab>; defm V_CVT_SCALE_PK8_BF16_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ac>; defm V_CVT_SCALE_PK8_F32_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ad>; defm V_CVT_SCALEF32_PK8_FP4_F32 : VOP3Only_Real_Base_gfx1250<0x2b0>; defm V_CVT_SCALEF32_PK8_FP4_F16 : VOP3Only_Real_Base_gfx1250<0x2b3>; defm V_CVT_SCALEF32_PK8_FP8_BF16 : VOP3Only_Real_Base_gfx1250<0x2b4>; defm V_CVT_SCALEF32_PK8_BF8_BF16 : VOP3Only_Real_Base_gfx1250<0x2b5>; defm V_CVT_SCALEF32_PK8_FP4_BF16 : VOP3Only_Real_Base_gfx1250<0x2b8>; defm V_CVT_SCALEF32_PK8_FP8_F32 : VOP3Only_Real_Base_gfx1250<0x2c3>; defm V_CVT_SCALEF32_PK8_FP8_F16 : VOP3Only_Real_Base_gfx1250<0x2c4>; defm V_CVT_SCALEF32_PK8_BF8_F32 : VOP3Only_Real_Base_gfx1250<0x2c5>; defm V_CVT_SCALEF32_PK8_BF8_F16 : VOP3Only_Real_Base_gfx1250<0x2c6>; defm V_CVT_SCALE_PK16_F16_FP6 : VOP3Only_ScaleSel_Real_gfx1250<0x2c7>; defm V_CVT_SCALE_PK16_BF16_FP6 : VOP3Only_ScaleSel_Real_gfx1250<0x2c8>; defm V_CVT_SCALE_PK16_F32_FP6 : VOP3Only_ScaleSel_Real_gfx1250<0x2c9>; defm V_CVT_SCALE_PK16_F16_BF6 : VOP3Only_ScaleSel_Real_gfx1250<0x2ca>; defm V_CVT_SCALE_PK16_BF16_BF6 : VOP3Only_ScaleSel_Real_gfx1250<0x2cb>; defm V_CVT_SCALE_PK16_F32_BF6 : VOP3Only_ScaleSel_Real_gfx1250<0x2cc>; defm V_CVT_SCALEF32_PK16_FP6_F32 : VOP3Only_Real_Base_gfx1250<0x2cd>; defm V_CVT_SCALEF32_PK16_BF6_F32 : VOP3Only_Real_Base_gfx1250<0x2ce>; defm V_CVT_SCALEF32_PK16_FP6_F16 : VOP3Only_Real_Base_gfx1250<0x2cf>; defm V_CVT_SCALEF32_PK16_BF6_F16 : VOP3Only_Real_Base_gfx1250<0x2d0>; defm V_CVT_SCALEF32_PK16_FP6_BF16 : VOP3Only_Real_Base_gfx1250<0x2d1>; defm V_CVT_SCALEF32_PK16_BF6_BF16 : VOP3Only_Real_Base_gfx1250<0x2d2>; defm V_CVT_SCALEF32_SR_PK16_FP6_F32 : VOP3Only_Real_Base_gfx1250<0x2d3>; defm V_CVT_SCALEF32_SR_PK16_BF6_F32 : VOP3Only_Real_Base_gfx1250<0x2d4>; defm V_CVT_SCALEF32_SR_PK16_FP6_F16 : VOP3Only_Real_Base_gfx1250<0x2d5>; defm V_CVT_SCALEF32_SR_PK16_BF6_F16 : VOP3Only_Real_Base_gfx1250<0x2d6>; defm V_CVT_SCALEF32_SR_PK16_FP6_BF16 : VOP3Only_Real_Base_gfx1250<0x2d7>; defm V_CVT_SCALEF32_SR_PK16_BF6_BF16 : VOP3Only_Real_Base_gfx1250<0x2d8>; defm V_CVT_SCALEF32_SR_PK8_FP4_F32 : VOP3Only_Real_Base_gfx1250<0x297>; defm V_CVT_SCALEF32_SR_PK8_FP8_F32 : VOP3Only_Real_Base_gfx1250<0x298>; defm V_CVT_SCALEF32_SR_PK8_BF8_F32 : VOP3Only_Real_Base_gfx1250<0x299>; defm V_CVT_SCALEF32_SR_PK8_FP4_F16 : VOP3Only_Real_Base_gfx1250<0x2b9>; defm V_CVT_SCALEF32_SR_PK8_FP4_BF16 : VOP3Only_Real_Base_gfx1250<0x2bc>; defm V_CVT_SCALEF32_SR_PK8_FP8_F16 : VOP3Only_Real_Base_gfx1250<0x2bf>; defm V_CVT_SCALEF32_SR_PK8_FP8_BF16 : VOP3Only_Real_Base_gfx1250<0x2c0>; defm V_CVT_SCALEF32_SR_PK8_BF8_F16 : VOP3Only_Real_Base_gfx1250<0x2c1>; defm V_CVT_SCALEF32_SR_PK8_BF8_BF16 : VOP3Only_Real_Base_gfx1250<0x2c2>; defm V_CVT_PK_BF16_F32 : VOP3Only_Realtriple_gfx1250<0x36d>; defm V_CVT_SR_PK_BF16_F32 : VOP3Only_Realtriple_gfx1250<0x36e>; defm V_CVT_PK_F16_F32 : VOP3Only_Realtriple_gfx1250<0x36f>; defm V_CVT_SR_PK_F16_F32 : VOP3Only_Realtriple_gfx1250<0x370>; defm V_CVT_PK_FP8_F16_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x372, "v_cvt_pk_fp8_f16">; defm V_CVT_PK_BF8_F16_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x373, "v_cvt_pk_bf8_f16">; defm V_CVT_SR_FP8_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x374>; defm V_CVT_SR_BF8_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x375>; //===----------------------------------------------------------------------===// // GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in { multiclass VOP3_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, VOP3e_gfx10(NAME#"_e64").Pfl>; } multiclass VOP3_Real_No_Suffix_gfx10 op> { def _gfx10 : VOP3_Real(NAME), SIEncodingFamily.GFX10>, VOP3e_gfx10(NAME).Pfl>; } multiclass VOP3_Real_gfx10_with_name op, string opName, string asmName> { def _gfx10 : VOP3_Real(opName#"_e64"), SIEncodingFamily.GFX10>, VOP3e_gfx10(opName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(opName#"_e64"); let AsmString = asmName # ps.AsmOperands; let IsSingle = 1; } } multiclass VOP3be_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, VOP3be_gfx10(NAME#"_e64").Pfl>; } multiclass VOP3Interp_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME), SIEncodingFamily.GFX10>, VOP3Interp_gfx10(NAME).Pfl>; } multiclass VOP3OpSel_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, VOP3OpSel_gfx10(NAME#"_e64").Pfl>; } multiclass VOP3OpSel_Real_gfx10_with_name op, string opName, string asmName> { def _gfx10 : VOP3_Real(opName#"_e64"), SIEncodingFamily.GFX10>, VOP3OpSel_gfx10(opName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(opName#"_e64"); let AsmString = asmName # ps.AsmOperands; } } } // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" defm V_ALIGNBIT_B32_opsel : VOP3OpSel_Real_gfx10_with_name<0x14e, "V_ALIGNBIT_B32_opsel", "v_alignbit_b32">; defm V_ALIGNBYTE_B32_opsel : VOP3OpSel_Real_gfx10_with_name<0x14f, "V_ALIGNBYTE_B32_opsel", "v_alignbyte_b32">; defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx10<0x360>; let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in { defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>; } // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) let SubtargetPredicate = isGFX10Before1030 in { defm V_MUL_LO_I32 : VOP3_Real_gfx10<0x16b>; } defm V_XOR3_B32 : VOP3_Real_gfx10<0x178>; defm V_LSHLREV_B64 : VOP3_Real_gfx10<0x2ff>; defm V_LSHRREV_B64 : VOP3_Real_gfx10<0x300>; defm V_ASHRREV_I64 : VOP3_Real_gfx10<0x301>; defm V_PERM_B32 : VOP3_Real_gfx10<0x344>; defm V_XAD_U32 : VOP3_Real_gfx10<0x345>; defm V_LSHL_ADD_U32 : VOP3_Real_gfx10<0x346>; defm V_ADD_LSHL_U32 : VOP3_Real_gfx10<0x347>; defm V_ADD3_U32 : VOP3_Real_gfx10<0x36d>; defm V_LSHL_OR_B32 : VOP3_Real_gfx10<0x36f>; defm V_AND_OR_B32 : VOP3_Real_gfx10<0x371>; defm V_OR3_B32 : VOP3_Real_gfx10<0x372>; // TODO-GFX10: add MC tests for v_add/sub_nc_i16 defm V_ADD_NC_I16 : VOP3OpSel_Real_gfx10_with_name<0x30d, "V_ADD_I16", "v_add_nc_i16">; defm V_SUB_NC_I16 : VOP3OpSel_Real_gfx10_with_name<0x30e, "V_SUB_I16", "v_sub_nc_i16">; defm V_SUB_NC_I32 : VOP3_Real_gfx10_with_name<0x376, "V_SUB_I32", "v_sub_nc_i32">; defm V_ADD_NC_I32 : VOP3_Real_gfx10_with_name<0x37f, "V_ADD_I32", "v_add_nc_i32">; defm V_INTERP_P1_F32_e64 : VOP3Interp_Real_gfx10<0x200>; defm V_INTERP_P2_F32_e64 : VOP3Interp_Real_gfx10<0x201>; defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_gfx10<0x202>; defm V_INTERP_P1LL_F16 : VOP3Interp_Real_gfx10<0x342>; defm V_INTERP_P1LV_F16 : VOP3Interp_Real_gfx10<0x343>; defm V_INTERP_P2_F16 : VOP3Interp_Real_gfx10<0x35a>; defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx10<0x311>; defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx10<0x312>; defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx10<0x313>; defm V_MIN3_F16 : VOP3OpSel_Real_gfx10<0x351>; defm V_MIN3_I16 : VOP3OpSel_Real_gfx10<0x352>; defm V_MIN3_U16 : VOP3OpSel_Real_gfx10<0x353>; defm V_MAX3_F16 : VOP3OpSel_Real_gfx10<0x354>; defm V_MAX3_I16 : VOP3OpSel_Real_gfx10<0x355>; defm V_MAX3_U16 : VOP3OpSel_Real_gfx10<0x356>; defm V_MED3_F16 : VOP3OpSel_Real_gfx10<0x357>; defm V_MED3_I16 : VOP3OpSel_Real_gfx10<0x358>; defm V_MED3_U16 : VOP3OpSel_Real_gfx10<0x359>; defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx10<0x373>; defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx10<0x375>; defm V_MAD_U16 : VOP3OpSel_Real_gfx10_with_name<0x340, "V_MAD_U16_gfx9", "v_mad_u16">; defm V_FMA_F16 : VOP3OpSel_Real_gfx10_with_name<0x34b, "V_FMA_F16_gfx9", "v_fma_f16">; defm V_MAD_I16 : VOP3OpSel_Real_gfx10_with_name<0x35e, "V_MAD_I16_gfx9", "v_mad_i16">; defm V_DIV_FIXUP_F16 : VOP3OpSel_Real_gfx10_with_name<0x35f, "V_DIV_FIXUP_F16_gfx9", "v_div_fixup_f16">; defm V_ADD_NC_U16 : VOP3OpSel_Real_gfx10<0x303>; defm V_SUB_NC_U16 : VOP3OpSel_Real_gfx10<0x304>; defm V_MUL_LO_U16 : VOP3OpSel_Real_gfx10_with_name<0x305, "V_MUL_LO_U16_opsel", "v_mul_lo_u16">; defm V_LSHRREV_B16 : VOP3OpSel_Real_gfx10_with_name<0x307, "V_LSHRREV_B16_opsel", "v_lshrrev_b16">; defm V_ASHRREV_I16 : VOP3OpSel_Real_gfx10_with_name<0x308, "V_ASHRREV_I16_opsel", "v_ashrrev_i16">; defm V_MAX_U16 : VOP3OpSel_Real_gfx10_with_name<0x309, "V_MAX_U16_opsel", "v_max_u16">; defm V_MAX_I16 : VOP3OpSel_Real_gfx10_with_name<0x30a, "V_MAX_I16_opsel", "v_max_i16">; defm V_MIN_U16 : VOP3OpSel_Real_gfx10_with_name<0x30b, "V_MIN_U16_opsel", "v_min_u16">; defm V_MIN_I16 : VOP3OpSel_Real_gfx10_with_name<0x30c, "V_MIN_I16_opsel", "v_min_i16">; defm V_LSHLREV_B16 : VOP3OpSel_Real_gfx10_with_name<0x314, "V_LSHLREV_B16_opsel", "v_lshlrev_b16">; defm V_PERMLANE16_B32 : VOP3OpSel_Real_gfx10<0x377>; defm V_PERMLANEX16_B32 : VOP3OpSel_Real_gfx10<0x378>; //===----------------------------------------------------------------------===// // GFX7, GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" in { multiclass VOP3_Real_gfx7 op> { def _gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3e_gfx6_gfx7(NAME#"_e64").Pfl>; } multiclass VOP3be_Real_gfx7 op> { def _gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3be_gfx6_gfx7(NAME#"_e64").Pfl>; } } // End AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" multiclass VOP3_Real_gfx7_gfx10 op> : VOP3_Real_gfx7, VOP3_Real_gfx10; multiclass VOP3be_Real_gfx7_gfx10 op> : VOP3be_Real_gfx7, VOP3be_Real_gfx10; defm V_QSAD_PK_U16_U8 : VOP3_Real_gfx7_gfx10<0x172>; defm V_MQSAD_U32_U8 : VOP3_Real_gfx7_gfx10<0x175>; defm V_MAD_U64_U32 : VOP3be_Real_gfx7_gfx10<0x176>; defm V_MAD_I64_I32 : VOP3be_Real_gfx7_gfx10<0x177>; //===----------------------------------------------------------------------===// // GFX6, GFX7, GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in { multiclass VOP3_Real_gfx6_gfx7 op> { def _gfx6_gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3e_gfx6_gfx7(NAME#"_e64").Pfl>; } multiclass VOP3be_Real_gfx6_gfx7 op> { def _gfx6_gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3be_gfx6_gfx7(NAME#"_e64").Pfl>; } } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" multiclass VOP3_Real_gfx6_gfx7_gfx10 op> : VOP3_Real_gfx6_gfx7, VOP3_Real_gfx10; multiclass VOP3be_Real_gfx6_gfx7_gfx10 op> : VOP3be_Real_gfx6_gfx7, VOP3be_Real_gfx10; defm V_LSHL_B64 : VOP3_Real_gfx6_gfx7<0x161>; defm V_LSHR_B64 : VOP3_Real_gfx6_gfx7<0x162>; defm V_ASHR_I64 : VOP3_Real_gfx6_gfx7<0x163>; defm V_MUL_LO_I32 : VOP3_Real_gfx6_gfx7<0x16b>; defm V_MAD_LEGACY_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x140>; defm V_MAD_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x141>; defm V_MAD_I32_I24 : VOP3_Real_gfx6_gfx7_gfx10<0x142>; defm V_MAD_U32_U24 : VOP3_Real_gfx6_gfx7_gfx10<0x143>; defm V_CUBEID_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x144>; defm V_CUBESC_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x145>; defm V_CUBETC_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x146>; defm V_CUBEMA_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x147>; defm V_BFE_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x148>; defm V_BFE_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x149>; defm V_BFI_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14a>; defm V_FMA_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x14b>; defm V_FMA_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x14c>; defm V_LERP_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x14d>; defm V_ALIGNBIT_B32 : VOP3_Real_gfx6_gfx7<0x14e>; defm V_ALIGNBYTE_B32 : VOP3_Real_gfx6_gfx7<0x14f>; defm V_MULLIT_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x150>; defm V_MIN3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x151>; defm V_MIN3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x152>; defm V_MIN3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x153>; defm V_MAX3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x154>; defm V_MAX3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x155>; defm V_MAX3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x156>; defm V_MED3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x157>; defm V_MED3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x158>; defm V_MED3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x159>; defm V_SAD_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x15a>; defm V_SAD_HI_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x15b>; defm V_SAD_U16 : VOP3_Real_gfx6_gfx7_gfx10<0x15c>; defm V_SAD_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x15d>; defm V_CVT_PK_U8_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x15e>; defm V_DIV_FIXUP_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x15f>; defm V_DIV_FIXUP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x160>; defm V_ADD_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x164>; defm V_MUL_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x165>; defm V_MIN_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x166>; defm V_MAX_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x167>; defm V_LDEXP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x168>; defm V_MUL_LO_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x169>; defm V_MUL_HI_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x16a>; defm V_MUL_HI_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x16c>; defm V_DIV_FMAS_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x16f>; defm V_DIV_FMAS_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x170>; defm V_MSAD_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x171>; defm V_MQSAD_PK_U16_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x173>; defm V_TRIG_PREOP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x174>; defm V_DIV_SCALE_F32 : VOP3be_Real_gfx6_gfx7_gfx10<0x16d>; defm V_DIV_SCALE_F64 : VOP3be_Real_gfx6_gfx7_gfx10<0x16e>; // NB: Same opcode as v_mad_legacy_f32 let DecoderNamespace = "GFX10_B" in defm V_FMA_LEGACY_F32 : VOP3_Real_gfx10<0x140>; //===----------------------------------------------------------------------===// // GFX8, GFX9 (VI). //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in { multiclass VOP3_Real_vi op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3e_vi (NAME#"_e64").Pfl>; } multiclass VOP3_Real_No_Suffix_vi op> { def _vi : VOP3_Real(NAME), SIEncodingFamily.VI>, VOP3e_vi (NAME).Pfl>; } multiclass VOP3be_Real_vi op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3be_vi (NAME#"_e64").Pfl>; } multiclass VOP3OpSel_Real_gfx9 op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3OpSel_gfx9 (NAME#"_e64").Pfl>; } multiclass VOP3OpSel_Real_gfx9_forced_opsel2 op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3OpSel_gfx9 (NAME#"_e64").Pfl> { let Inst{13} = src2_modifiers{2}; // op_sel(2) } } multiclass VOP3Interp_Real_vi op> { def _vi : VOP3_Real(NAME), SIEncodingFamily.VI>, VOP3Interp_vi (NAME).Pfl>; } } // End AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" let AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" in { multiclass VOP3_F16_Real_vi op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3e_vi (NAME#"_e64").Pfl>; } multiclass VOP3Interp_F16_Real_vi op> { def _vi : VOP3_Real(NAME), SIEncodingFamily.VI>, VOP3Interp_vi (NAME).Pfl>; } } // End AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { multiclass VOP3_F16_Real_gfx9 op, string OpName, string AsmName> { def _gfx9 : VOP3_Real(OpName#"_e64"), SIEncodingFamily.GFX9>, VOP3e_vi (OpName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(OpName#"_e64"); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3OpSel_F16_Real_gfx9 op, string AsmName> { def _gfx9 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX9>, VOP3OpSel_gfx9 (NAME#"_e64").Pfl> { VOP3_Pseudo ps = !cast(NAME#"_e64"); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3Interp_F16_Real_gfx9 op, string OpName, string AsmName> { def _gfx9 : VOP3_Real(OpName), SIEncodingFamily.GFX9>, VOP3Interp_vi (OpName).Pfl> { VOP3_Pseudo ps = !cast(OpName); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3_Real_gfx9 op, string AsmName> { def _gfx9 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX9>, VOP3e_vi (NAME#"_e64").Pfl> { VOP_Pseudo ps = !cast(NAME#"_e64"); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3_Real_BITOP3_gfx9 op, string AsmName, bit isSingle = 0> { defvar ps = !cast(NAME#"_e64"); let IsSingle = !or(isSingle, ps.Pfl.IsSingle) in { def _gfx9 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX9>, VOP3e_vi (NAME#"_e64").Pfl> { let AsmString = AsmName # ps.AsmOperands; bits<8> bitop3; let Inst{60-59} = bitop3{7-6}; let Inst{10-8} = bitop3{5-3}; let Inst{63-61} = bitop3{2-0}; let Inst{11} = !if(ps.Pfl.HasOpSel, src0_modifiers{2}, 0); let Inst{12} = !if(ps.Pfl.HasOpSel, src1_modifiers{2}, 0); let Inst{13} = !if(ps.Pfl.HasOpSel, src2_modifiers{2}, 0); let Inst{14} = !if(ps.Pfl.HasOpSel, src0_modifiers{3}, 0); } } } // Instructions such as v_alignbyte_b32 allows op_sel in gfx9, but not in vi. // The following is created to support that. multiclass VOP3OpSel_Real_gfx9_with_name op, string opName, string AsmName> { defvar psName = opName#"_e64"; def _gfx9 : VOP3_Real(psName), SIEncodingFamily.VI>, // note: encoding family is VI VOP3OpSel_gfx9 (psName).Pfl> { VOP3_Pseudo ps = !cast(psName); let AsmString = AsmName # ps.AsmOperands; } } } // End AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" defm V_MAD_U64_U32 : VOP3be_Real_vi <0x1E8>; defm V_MAD_I64_I32 : VOP3be_Real_vi <0x1E9>; defm V_MAD_LEGACY_F32 : VOP3_Real_vi <0x1c0>; defm V_MAD_F32 : VOP3_Real_vi <0x1c1>; defm V_MAD_I32_I24 : VOP3_Real_vi <0x1c2>; defm V_MAD_U32_U24 : VOP3_Real_vi <0x1c3>; defm V_CUBEID_F32 : VOP3_Real_vi <0x1c4>; defm V_CUBESC_F32 : VOP3_Real_vi <0x1c5>; defm V_CUBETC_F32 : VOP3_Real_vi <0x1c6>; defm V_CUBEMA_F32 : VOP3_Real_vi <0x1c7>; defm V_BFE_U32 : VOP3_Real_vi <0x1c8>; defm V_BFE_I32 : VOP3_Real_vi <0x1c9>; defm V_BFI_B32 : VOP3_Real_vi <0x1ca>; defm V_FMA_F32 : VOP3_Real_vi <0x1cb>; defm V_FMA_F64 : VOP3_Real_vi <0x1cc>; defm V_LERP_U8 : VOP3_Real_vi <0x1cd>; let SubtargetPredicate = isGFX8Only in { defm V_ALIGNBIT_B32 : VOP3_Real_vi <0x1ce>; defm V_ALIGNBYTE_B32 : VOP3_Real_vi <0x1cf>; } defm V_MIN3_F32 : VOP3_Real_vi <0x1d0>; defm V_MIN3_I32 : VOP3_Real_vi <0x1d1>; defm V_MIN3_U32 : VOP3_Real_vi <0x1d2>; defm V_MAX3_F32 : VOP3_Real_vi <0x1d3>; defm V_MAX3_I32 : VOP3_Real_vi <0x1d4>; defm V_MAX3_U32 : VOP3_Real_vi <0x1d5>; defm V_MED3_F32 : VOP3_Real_vi <0x1d6>; defm V_MED3_I32 : VOP3_Real_vi <0x1d7>; defm V_MED3_U32 : VOP3_Real_vi <0x1d8>; defm V_SAD_U8 : VOP3_Real_vi <0x1d9>; defm V_SAD_HI_U8 : VOP3_Real_vi <0x1da>; defm V_SAD_U16 : VOP3_Real_vi <0x1db>; defm V_SAD_U32 : VOP3_Real_vi <0x1dc>; defm V_CVT_PK_U8_F32 : VOP3_Real_vi <0x1dd>; defm V_DIV_FIXUP_F32 : VOP3_Real_vi <0x1de>; defm V_DIV_FIXUP_F64 : VOP3_Real_vi <0x1df>; defm V_DIV_SCALE_F32 : VOP3be_Real_vi <0x1e0>; defm V_DIV_SCALE_F64 : VOP3be_Real_vi <0x1e1>; defm V_DIV_FMAS_F32 : VOP3_Real_vi <0x1e2>; defm V_DIV_FMAS_F64 : VOP3_Real_vi <0x1e3>; defm V_MSAD_U8 : VOP3_Real_vi <0x1e4>; defm V_QSAD_PK_U16_U8 : VOP3_Real_vi <0x1e5>; defm V_MQSAD_PK_U16_U8 : VOP3_Real_vi <0x1e6>; defm V_MQSAD_U32_U8 : VOP3_Real_vi <0x1e7>; defm V_PERM_B32 : VOP3_Real_vi <0x1ed>; defm V_MAD_F16 : VOP3_F16_Real_vi <0x1ea>; defm V_MAD_U16 : VOP3_F16_Real_vi <0x1eb>; defm V_MAD_I16 : VOP3_F16_Real_vi <0x1ec>; defm V_FMA_F16 : VOP3_F16_Real_vi <0x1ee>; defm V_DIV_FIXUP_F16 : VOP3_F16_Real_vi <0x1ef>; defm V_INTERP_P2_F16 : VOP3Interp_F16_Real_vi <0x276>; let FPDPRounding = 1 in { defm V_MAD_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ea, "V_MAD_F16", "v_mad_legacy_f16">; defm V_FMA_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ee, "V_FMA_F16", "v_fma_legacy_f16">; defm V_DIV_FIXUP_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ef, "V_DIV_FIXUP_F16", "v_div_fixup_legacy_f16">; defm V_INTERP_P2_LEGACY_F16 : VOP3Interp_F16_Real_gfx9 <0x276, "V_INTERP_P2_F16", "v_interp_p2_legacy_f16">; } // End FPDPRounding = 1 defm V_MAD_LEGACY_U16 : VOP3_F16_Real_gfx9 <0x1eb, "V_MAD_U16", "v_mad_legacy_u16">; defm V_MAD_LEGACY_I16 : VOP3_F16_Real_gfx9 <0x1ec, "V_MAD_I16", "v_mad_legacy_i16">; defm V_ALIGNBIT_B32_opsel : VOP3OpSel_Real_gfx9_with_name <0x1ce, "V_ALIGNBIT_B32_opsel", "v_alignbit_b32">; defm V_ALIGNBYTE_B32_opsel : VOP3OpSel_Real_gfx9_with_name <0x1cf, "V_ALIGNBYTE_B32_opsel", "v_alignbyte_b32">; defm V_MAD_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x203, "v_mad_f16">; defm V_MAD_U16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x204, "v_mad_u16">; defm V_MAD_I16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x205, "v_mad_i16">; defm V_FMA_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x206, "v_fma_f16">; defm V_DIV_FIXUP_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x207, "v_div_fixup_f16">; defm V_INTERP_P2_F16_gfx9 : VOP3Interp_F16_Real_gfx9 <0x277, "V_INTERP_P2_F16_gfx9", "v_interp_p2_f16">; defm V_ADD_I32 : VOP3_Real_vi <0x29c>; defm V_SUB_I32 : VOP3_Real_vi <0x29d>; defm V_INTERP_P1_F32_e64 : VOP3Interp_Real_vi <0x270>; defm V_INTERP_P2_F32_e64 : VOP3Interp_Real_vi <0x271>; defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_vi <0x272>; defm V_INTERP_P1LL_F16 : VOP3Interp_Real_vi <0x274>; defm V_INTERP_P1LV_F16 : VOP3Interp_Real_vi <0x275>; defm V_ADD_F64 : VOP3_Real_vi <0x280>; defm V_MUL_F64 : VOP3_Real_vi <0x281>; defm V_MIN_F64 : VOP3_Real_vi <0x282>; defm V_MAX_F64 : VOP3_Real_vi <0x283>; defm V_LDEXP_F64 : VOP3_Real_vi <0x284>; defm V_MUL_LO_U32 : VOP3_Real_vi <0x285>; // removed from VI as identical to V_MUL_LO_U32 let isAsmParserOnly = 1 in { defm V_MUL_LO_I32 : VOP3_Real_vi <0x285>; } defm V_MUL_HI_U32 : VOP3_Real_vi <0x286>; defm V_MUL_HI_I32 : VOP3_Real_vi <0x287>; defm V_READLANE_B32 : VOP3_Real_No_Suffix_vi <0x289>; defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_vi <0x28a>; defm V_LSHLREV_B64 : VOP3_Real_vi <0x28f>; defm V_LSHRREV_B64 : VOP3_Real_vi <0x290>; defm V_ASHRREV_I64 : VOP3_Real_vi <0x291>; defm V_TRIG_PREOP_F64 : VOP3_Real_vi <0x292>; defm V_LSHL_ADD_U32 : VOP3_Real_vi <0x1fd>; defm V_ADD_LSHL_U32 : VOP3_Real_vi <0x1fe>; defm V_ADD3_U32 : VOP3_Real_vi <0x1ff>; defm V_LSHL_OR_B32 : VOP3_Real_vi <0x200>; defm V_AND_OR_B32 : VOP3_Real_vi <0x201>; defm V_OR3_B32 : VOP3_Real_vi <0x202>; defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx9 <0x2a0>; defm V_XAD_U32 : VOP3_Real_vi <0x1f3>; defm V_MIN3_F16 : VOP3OpSel_Real_gfx9 <0x1f4>; defm V_MIN3_I16 : VOP3OpSel_Real_gfx9 <0x1f5>; defm V_MIN3_U16 : VOP3OpSel_Real_gfx9 <0x1f6>; defm V_MAX3_F16 : VOP3OpSel_Real_gfx9 <0x1f7>; defm V_MAX3_I16 : VOP3OpSel_Real_gfx9 <0x1f8>; defm V_MAX3_U16 : VOP3OpSel_Real_gfx9 <0x1f9>; defm V_MED3_F16 : VOP3OpSel_Real_gfx9 <0x1fa>; defm V_MED3_I16 : VOP3OpSel_Real_gfx9 <0x1fb>; defm V_MED3_U16 : VOP3OpSel_Real_gfx9 <0x1fc>; defm V_ADD_I16 : VOP3OpSel_Real_gfx9 <0x29e>; defm V_SUB_I16 : VOP3OpSel_Real_gfx9 <0x29f>; defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx9 <0x1f1>; defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx9 <0x1f2>; defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx9 <0x299>; defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx9 <0x29a>; defm V_LSHL_ADD_U64 : VOP3_Real_vi <0x208>; defm V_CVT_PK_FP8_F32 : VOP3OpSel_Real_gfx9 <0x2a2>; defm V_CVT_PK_BF8_F32 : VOP3OpSel_Real_gfx9 <0x2a3>; defm V_CVT_PK_BF16_F32: VOP3OpSel_Real_gfx9 <0x268>; defm V_CVT_SR_FP8_F32 : VOP3OpSel_Real_gfx9_forced_opsel2 <0x2a4>; defm V_CVT_SR_BF8_F32 : VOP3OpSel_Real_gfx9_forced_opsel2 <0x2a5>; defm V_MINIMUM3_F32 : VOP3_Real_vi <0x2a8>; defm V_MAXIMUM3_F32 : VOP3_Real_vi <0x2a9>; defm V_BITOP3_B16 : VOP3_Real_BITOP3_gfx9<0x233, "v_bitop3_b16">; defm V_BITOP3_B32 : VOP3_Real_BITOP3_gfx9<0x234, "v_bitop3_b32">; let OtherPredicates = [HasFP8ConversionScaleInsts] in { defm V_CVT_SCALEF32_SR_FP8_BF16 : VOP3OpSel_Real_gfx9<0x246>; defm V_CVT_SCALEF32_SR_FP8_F16 : VOP3OpSel_Real_gfx9<0x242>; defm V_CVT_SCALEF32_SR_FP8_F32 : VOP3OpSel_Real_gfx9<0x237>; defm V_CVT_SCALEF32_F16_FP8 : VOP3OpSel_Real_gfx9 <0x24a>; defm V_CVT_SCALEF32_F32_FP8 : VOP3OpSel_Real_gfx9 <0x23b>; defm V_CVT_SCALEF32_PK_FP8_F32 : VOP3OpSel_Real_gfx9 <0x235>; defm V_CVT_SCALEF32_PK_F32_FP8 : VOP3OpSel_Real_gfx9 <0x239>; defm V_CVT_SCALEF32_PK_FP8_F16 : VOP3OpSel_Real_gfx9 <0x240>; defm V_CVT_SCALEF32_PK_FP8_BF16: VOP3OpSel_Real_gfx9 <0x244>; defm V_CVT_SCALEF32_PK_F16_FP8 : VOP3OpSel_Real_gfx9<0x248>; defm V_CVT_SCALEF32_PK_BF16_FP8 : VOP3OpSel_Real_gfx9<0x269>; } let OtherPredicates = [HasBF8ConversionScaleInsts] in { defm V_CVT_SCALEF32_SR_BF8_BF16 : VOP3OpSel_Real_gfx9<0x247>; defm V_CVT_SCALEF32_SR_BF8_F16 : VOP3OpSel_Real_gfx9<0x243>; defm V_CVT_SCALEF32_SR_BF8_F32 : VOP3OpSel_Real_gfx9<0x238>; defm V_CVT_SCALEF32_F16_BF8 : VOP3OpSel_Real_gfx9 <0x24b>; defm V_CVT_SCALEF32_F32_BF8 : VOP3OpSel_Real_gfx9 <0x23c>; defm V_CVT_SCALEF32_PK_BF8_F32 : VOP3OpSel_Real_gfx9 <0x236>; defm V_CVT_SCALEF32_PK_F32_BF8 : VOP3OpSel_Real_gfx9 <0x23a>; defm V_CVT_SCALEF32_PK_BF8_F16 : VOP3OpSel_Real_gfx9 <0x241>; defm V_CVT_SCALEF32_PK_BF8_BF16: VOP3OpSel_Real_gfx9 <0x245>; defm V_CVT_SCALEF32_PK_F16_BF8 : VOP3OpSel_Real_gfx9<0x249>; defm V_CVT_SCALEF32_PK_BF16_BF8 : VOP3OpSel_Real_gfx9<0x26a>; } let OtherPredicates = [HasFP4ConversionScaleInsts] in { defm V_CVT_SCALEF32_PK_F32_FP4 : VOP3OpSel_Real_gfx9 <0x23f>; defm V_CVT_SCALEF32_PK_FP4_F32 : VOP3OpSel_Real_gfx9 <0x23d>; defm V_CVT_SCALEF32_PK_F16_FP4 : VOP3OpSel_Real_gfx9 <0x250>; defm V_CVT_SCALEF32_PK_BF16_FP4 : VOP3OpSel_Real_gfx9 <0x251>; defm V_CVT_SCALEF32_PK_FP4_F16 : VOP3OpSel_Real_gfx9_forced_opsel2 <0x24c>; defm V_CVT_SCALEF32_PK_FP4_BF16: VOP3OpSel_Real_gfx9_forced_opsel2 <0x24d>; defm V_CVT_SCALEF32_SR_PK_FP4_F16: VOP3OpSel_Real_gfx9 <0x24e>; defm V_CVT_SCALEF32_SR_PK_FP4_BF16: VOP3OpSel_Real_gfx9 <0x24f>; defm V_CVT_SCALEF32_SR_PK_FP4_F32: VOP3OpSel_Real_gfx9 <0x23e>; } let OtherPredicates = [HasFP6BF6ConversionScaleInsts] in { defm V_CVT_SCALEF32_PK32_F32_FP6 : VOP3_Real_gfx9<0x256, "v_cvt_scalef32_pk32_f32_fp6">; defm V_CVT_SCALEF32_PK32_F32_BF6 : VOP3_Real_gfx9<0x257, "v_cvt_scalef32_pk32_f32_bf6">; defm V_CVT_SCALEF32_PK32_F16_FP6 : VOP3_Real_gfx9<0x260, "v_cvt_scalef32_pk32_f16_fp6">; defm V_CVT_SCALEF32_PK32_BF16_FP6 : VOP3_Real_gfx9<0x261, "v_cvt_scalef32_pk32_bf16_fp6">; defm V_CVT_SCALEF32_PK32_F16_BF6 : VOP3_Real_gfx9<0x262, "v_cvt_scalef32_pk32_f16_bf6">; defm V_CVT_SCALEF32_PK32_BF16_BF6 : VOP3_Real_gfx9<0x263, "v_cvt_scalef32_pk32_bf16_bf6">; } let OtherPredicates = [HasF16BF16ToFP6BF6ConversionScaleInsts] in { defm V_CVT_SCALEF32_PK32_FP6_F16 : VOP3_Real_gfx9<0x258, "v_cvt_scalef32_pk32_fp6_f16">; defm V_CVT_SCALEF32_PK32_FP6_BF16 : VOP3_Real_gfx9<0x259, "v_cvt_scalef32_pk32_fp6_bf16">; defm V_CVT_SCALEF32_PK32_BF6_F16 : VOP3_Real_gfx9<0x25a, "v_cvt_scalef32_pk32_bf6_f16">; defm V_CVT_SCALEF32_PK32_BF6_BF16 : VOP3_Real_gfx9<0x25b, "v_cvt_scalef32_pk32_bf6_bf16">; defm V_CVT_SCALEF32_SR_PK32_BF6_BF16 : VOP3_Real_gfx9<0x25f, "v_cvt_scalef32_sr_pk32_bf6_bf16">; defm V_CVT_SCALEF32_SR_PK32_BF6_F16 : VOP3_Real_gfx9<0x25e, "v_cvt_scalef32_sr_pk32_bf6_f16">; defm V_CVT_SCALEF32_SR_PK32_BF6_F32 : VOP3_Real_gfx9<0x255, "v_cvt_scalef32_sr_pk32_bf6_f32">; defm V_CVT_SCALEF32_SR_PK32_FP6_BF16 : VOP3_Real_gfx9<0x25d, "v_cvt_scalef32_sr_pk32_fp6_bf16">; defm V_CVT_SCALEF32_SR_PK32_FP6_F16 : VOP3_Real_gfx9<0x25c, "v_cvt_scalef32_sr_pk32_fp6_f16">; defm V_CVT_SCALEF32_SR_PK32_FP6_F32 : VOP3_Real_gfx9<0x254, "v_cvt_scalef32_sr_pk32_fp6_f32">; } let OtherPredicates = [HasF32ToF16BF16ConversionSRInsts] in { defm V_CVT_SR_F16_F32 : VOP3OpSel_Real_gfx9 <0x2a6>; defm V_CVT_SR_BF16_F32: VOP3OpSel_Real_gfx9 <0x2a7>; } defm V_ASHR_PK_I8_I32 : VOP3OpSel_Real_gfx9 <0x265>; defm V_ASHR_PK_U8_I32 : VOP3OpSel_Real_gfx9 <0x266>; let OtherPredicates = [HasCvtPkF16F32Inst] in { defm V_CVT_PK_F16_F32 : VOP3_Real_gfx9<0x267, "v_cvt_pk_f16_f32">; } defm V_CVT_SCALEF32_2XPK16_FP6_F32 : VOP3_Real_gfx9<0x252, "v_cvt_scalef32_2xpk16_fp6_f32">; defm V_CVT_SCALEF32_2XPK16_BF6_F32 : VOP3_Real_gfx9<0x253, "v_cvt_scalef32_2xpk16_bf6_f32">;