//==- RISCVSchedSiFiveP600.td - SiFiveP600 Scheduling Defs ---*- tablegen -*-=// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// /// c is true if mx has the worst case behavior compared to LMULs in MxList. /// On the SiFiveP600, the worst case LMUL is the Largest LMUL /// and the worst case sew is the smallest SEW for that LMUL. class SiFiveP600IsWorstCaseMX MxList> { string LLMUL = LargestLMUL.r; bit c = !eq(mx, LLMUL); } class SiFiveP600IsWorstCaseMXSEW MxList, bit isF = 0> { string LLMUL = LargestLMUL.r; int SSEW = SmallestSEW.r; bit c = !and(!eq(mx, LLMUL), !eq(sew, SSEW)); } defvar SiFiveP600VLEN = 128; // 1 Micro-Op per cycle. class SiFiveP600GetLMulCycles { int c = !cond( !eq(mx, "M1") : 1, !eq(mx, "M2") : 2, !eq(mx, "M4") : 4, !eq(mx, "M8") : 8, !eq(mx, "MF2") : 1, !eq(mx, "MF4") : 1, !eq(mx, "MF8") : 1 ); } class SiFiveP600GetVLMAX { defvar LMUL = SiFiveP600GetLMulCycles.c; int val = !cond( !eq(mx, "MF2") : !div(!div(SiFiveP600VLEN, 2), sew), !eq(mx, "MF4") : !div(!div(SiFiveP600VLEN, 4), sew), !eq(mx, "MF8") : !div(!div(SiFiveP600VLEN, 8), sew), true: !div(!mul(SiFiveP600VLEN, LMUL), sew) ); } class SiFiveP600StridedLdStLatency { defvar VL = SiFiveP400GetVLMAX.val; int val = !cond( !eq(VL, 2): 13, !eq(VL, 4): 18, !eq(VL, 8): 22, !eq(VL, 16): 30, // VL=32,64,128 true: !sub(VL, 2) ); } // Latency for segmented loads and stores are calculated as vl * nf. class SiFiveP600SegmentedLdStCycles { int c = !mul(SiFiveP600GetVLMAX.val, nf); } class SiFiveP600VSM3CCycles { // c = ceil(LMUL / 2) int c = !cond(!eq(mx, "M2") : 1, !eq(mx, "M4") : 2, !eq(mx, "M8") : 4, true : 1); } class SiFiveP600RVVMultiplier { int c = !if(!eq(mx, "M8"), 2, 1); } // ====================================================================== // The latency and occupancy data in this section are primarily evaluated // from llvm-exegesis. // ====================================================================== class SiFiveP600VCryptoLatency { int c = !cond( !eq(mx, "M4"): 4, !eq(mx, "M8"): 8, true: 2 ); } class SiFiveP600VFMinMaxReduction { defvar E64Lat = !cond( !eq(mx, "M1") : 4, !eq(mx, "M2") : 6, !eq(mx, "M4") : 8, !eq(mx, "M8") : 10, true: 2 ); defvar E64Cycles = !cond( !eq(mx, "M1") : 3, !eq(mx, "M2") : 4, !eq(mx, "M4") : 5, !eq(mx, "M8") : 6, true: 2 ); int latency = !if(!eq(sew, 64), E64Lat, !add(E64Lat, 2)); int cycles = !if(!eq(sew, 64), E64Cycles, !add(E64Cycles, 1)); } class SiFiveP600VFUnorderedReduction { defvar E64Lat = !cond( !eq(mx, "M1") : 6, !eq(mx, "M2") : 12, !eq(mx, "M4") : 18, !eq(mx, "M8") : 24, true: 2 ); defvar E32Cycles = !cond( !eq(mx, "M1") : 10, !eq(mx, "M2") : 10, !eq(mx, "M4") : 11, !eq(mx, "M8") : 11, true: 6 ); int latency = !if(!eq(sew, 64), E64Lat, !add(E64Lat, 4)); int cycles = !if(!eq(sew, 64), 6, E32Cycles); } class SiFiveP600VFWidenUnorderedReduction { int latency = !cond( !eq(mx, "M1") : 10, !eq(mx, "M2") : 18, !eq(mx, "M4") : 24, !eq(mx, "M8") : 30, true: 6 ); } class SiFiveP600VFOrderedReduction { defvar Base = !if(!eq(sew, 64), 6, 10); int c = !cond( !eq(mx, "M1") : Base, !eq(mx, "M2") : !mul(Base, 2), !eq(mx, "M4") : !mul(Base, 4), !eq(mx, "M8") : !mul(Base, 8), true: 6 ); } class SiFiveP600VIReductionLatency { int c = !cond( !eq(mx, "M2") : 4, !eq(mx, "M4") : 8, !eq(mx, "M8") : 16, // M1 and lower true: 2 ); } class SiFiveP600VIMinMaxReductionLatency { // +-----+-----+-----+-----+----+ // | | E64 | E32 | E16 | E8 | // +-----+-----+-----+-----+----+ // | MF8 | X | X | X | 4 | // +-----+-----+-----+-----+----+ // | MF4 | X | X | 4 | 6 | // +-----+-----+-----+-----+----+ // | MF2 | X | 4 | 6 | 8 | // +-----+-----+-----+-----+----+ // | M1 | 4 | 6 | 8 | 10 | // +-----+-----+-----+-----+----+ // | M2 | 6 | 8 | 10 | 12 | // +-----+-----+-----+-----+----+ // | M4 | 8 | 10 | 12 | 14 | // +-----+-----+-----+-----+----+ // | M8 | 10 | 12 | 14 | 16 | // +-----+-----+-----+-----+----+ defvar BaseIndex = !cond( !eq(sew, 64): 0, !eq(sew, 32): 1, !eq(sew, 16): 2, !eq(sew, 8): 3 ); defvar Latencies = [4, 6, 8, 10, 12, 14, 16]; int c = !cond( !eq(mx, "M1") : Latencies[BaseIndex], !eq(mx, "M2") : Latencies[!add(BaseIndex, 1)], !eq(mx, "M4") : Latencies[!add(BaseIndex, 2)], !eq(mx, "M8") : Latencies[!add(BaseIndex, 3)], // Fractional !eq(mx, "MF2"): Latencies[!sub(BaseIndex, 1)], !eq(mx, "MF4"): Latencies[!sub(BaseIndex, 2)], !eq(mx, "MF8"): Latencies[!sub(BaseIndex, 3)], ); } class SiFiveP600VIMinMaxReductionCycles { // +-----+-----+-----+-----+----+ // | | E64 | E32 | E16 | E8 | // +-----+-----+-----+-----+----+ // | MF8 | X | X | X | 3 | // +-----+-----+-----+-----+----+ // | MF4 | X | X | 3 | 5 | // +-----+-----+-----+-----+----+ // | MF2 | X | 3 | 5 | 6 | // +-----+-----+-----+-----+----+ // | M1 | 3 | 4 | 6 | 8 | // +-----+-----+-----+-----+----+ // | M2 | 4 | 5 | 8 | 9 | // +-----+-----+-----+-----+----+ // | M4 | 5 | 6 | 10 | 11 | // +-----+-----+-----+-----+----+ // | M8 | 7 | 8 | 9 | 11 | // +-----+-----+-----+-----+----+ defvar Index = !cond( !eq(sew, 64): 0, !eq(sew, 32): 1, !eq(sew, 16): 2, !eq(sew, 8): 3 ); defvar Cycles = [ [0, 0, 0, 3], [0, 0, 3, 5], [0, 3, 5, 6], [3, 4, 6, 8], [4, 5, 8, 9], [5, 6, 10, 11], [7, 8, 9, 11] ]; int c = !cond( !eq(mx, "MF8"): Cycles[0][Index], !eq(mx, "MF4"): Cycles[1][Index], !eq(mx, "MF2"): Cycles[2][Index], !eq(mx, "M1"): Cycles[3][Index], !eq(mx, "M2"): Cycles[4][Index], !eq(mx, "M4"): Cycles[5][Index], !eq(mx, "M8"): Cycles[6][Index], ); } class SiFiveP600VSlide1 { int c = !cond( !eq(mx, "M2") : 3, !eq(mx, "M4") : 4, !eq(mx, "M8") : 8, // M1 and lower true: 2 ); } class SiFiveP600VSlideI { int c = !cond( !eq(mx, "M2") : 4, !eq(mx, "M4") : 6, !eq(mx, "M8") : 8, // M1 and lower true: 2 ); } class SiFiveP600VSlideXComplex { int latency = !cond( !eq(mx, "M2") : 11, !eq(mx, "M4") : 14, !eq(mx, "M8") : 20 ); int cycles = !cond( !eq(mx, "M2") : !if(isUp, 10, 11), !eq(mx, "M4") : !if(isUp, 12, 14), !eq(mx, "M8") : !if(isUp, 16, 20) ); } class SiFiveP600VPermutationComplex { int c = !cond( !eq(mx, "M2") : 12, !eq(mx, "M4") : 16, !eq(mx, "M8") : 24 ); } class SiFiveP600VSHA2MSCycles { int c = !cond( !eq(mx, "M2") : !if(!eq(sew, 32), 2, 3), !eq(mx, "M4") : !if(!eq(sew, 32), 4, 6), !eq(mx, "M8") : !if(!eq(sew, 32), 8, 12), true: 1 ); } // SiFiveP600 machine model for scheduling and other instruction cost heuristics. def SiFiveP600Model : SchedMachineModel { let IssueWidth = 4; // 4 micro-ops are dispatched per cycle. let MicroOpBufferSize = 192; // Max micro-ops that can be buffered. let LoadLatency = 4; // Cycles for loads to access the cache. let MispredictPenalty = 9; // Extra cycles for a mispredicted branch. let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx, HasStdExtZknd, HasStdExtZkne, HasStdExtZknh, HasStdExtZksed, HasStdExtZksh, HasStdExtZkr, HasVendorXSfvqmaccqoq]; let CompleteModel = false; } let SchedModel = SiFiveP600Model in { def SiFiveP600IEXQ0 : ProcResource<1>; def SiFiveP600IEXQ1 : ProcResource<1>; def SiFiveP600IEXQ2 : ProcResource<1>; def SiFiveP600IEXQ3 : ProcResource<1>; def SiFiveP600FEXQ0 : ProcResource<1>; def SiFiveP600FEXQ1 : ProcResource<1>; // Two Load/Store ports that can issue either two loads, two stores, or one load // and one store (P550 has one load and one separate store pipe). def SiFiveP600LDST : ProcResource<2>; // 4-wide pipeline with 4 ALU pipes. def SiFiveP600IntArith : ProcResGroup<[SiFiveP600IEXQ0, SiFiveP600IEXQ1, SiFiveP600IEXQ2, SiFiveP600IEXQ3]>; defvar SiFiveP600SYS = SiFiveP600IEXQ0; defvar SiFiveP600CMOV = SiFiveP600IEXQ0; defvar SiFiveP600MulI2F = SiFiveP600IEXQ1; def SiFiveP600Branch : ProcResGroup<[SiFiveP600IEXQ2, SiFiveP600IEXQ3]>; def SiFiveP600Div : ProcResource<1>; def SiFiveP600FloatArith : ProcResGroup<[SiFiveP600FEXQ0, SiFiveP600FEXQ1]>; defvar SiFiveP600F2I = SiFiveP600FEXQ0; def SiFiveP600FloatDiv : ProcResource<1>; // Vector pipeline // VEXQ0 handle Mask, Simple Slide instructions, // VEXQ1 handle Complex Slide, Permutation, Reductions, Divide instructions. // Other vector instructions can be done in VEXQ0 and VEXQ1. def SiFiveP600VEXQ0 : ProcResource<1>; def SiFiveP600VEXQ1 : ProcResource<1>; def SiFiveP600VectorArith : ProcResGroup<[SiFiveP600VEXQ0, SiFiveP600VEXQ1]>; // Only VEXQ0 has mask unit. defvar SiFiveP600VectorMask = SiFiveP600VEXQ0; // Only VEXQ0 has vector crypto. defvar SiFiveP600VectorCrypto = SiFiveP600VEXQ0; def SiFiveP600VLD : ProcResource<1>; def SiFiveP600VST : ProcResource<1>; def SiFiveP600VDiv : ProcResource<1>; def SiFiveP600VFloatDiv : ProcResource<1>; // Integer arithmetic and logic def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; // Branching def : WriteRes; def : WriteRes; def : WriteRes; // CMOV def P600WriteCMOV : SchedWriteRes<[SiFiveP600Branch, SiFiveP600CMOV]> { let Latency = 2; let NumMicroOps = 2; } def : InstRW<[P600WriteCMOV], (instrs PseudoCCMOVGPRNoX0)>; let Latency = 2 in { // Integer multiplication def : WriteRes; def : WriteRes; // cpop[w] look exactly like multiply. def : WriteRes; def : WriteRes; } // Integer division def : WriteRes { let Latency = 35; let ReleaseAtCycles = [1, 34]; } def : WriteRes { let Latency = 20; let ReleaseAtCycles = [1, 19]; } // Integer remainder def : WriteRes { let Latency = 35; let ReleaseAtCycles = [1, 34]; } def : WriteRes { let Latency = 20; let ReleaseAtCycles = [1, 19]; } // Bitmanip def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; // Memory def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; let Latency = 4 in { def : WriteRes; def : WriteRes; } let Latency = 4 in { def : WriteRes; def : WriteRes; } let Latency = 5 in { def : WriteRes; def : WriteRes; def : WriteRes; } // Atomic memory let Latency = 3 in { def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; } // Floating point let Latency = 2 in { def : WriteRes; def : WriteRes; def : WriteRes; } let Latency = 3 in { def : WriteRes; def : WriteRes; def : WriteRes; } let Latency = 4 in { def : WriteRes; def : WriteRes; def : WriteRes; } let Latency = 2 in { def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; } // Half precision. def : WriteRes { let Latency = 4; let ReleaseAtCycles = [1, 4]; } def : WriteRes { let Latency = 18; let ReleaseAtCycles = [1, 17]; } // Single precision. def : WriteRes { let Latency = 6; let ReleaseAtCycles = [1, 6]; } def : WriteRes { let Latency = 18; let ReleaseAtCycles = [1, 17]; } // Double precision def : WriteRes { let Latency = 11; let ReleaseAtCycles = [1, 11]; } def : WriteRes { let Latency = 33; let ReleaseAtCycles = [1, 32]; } // Conversions let Latency = 2 in { def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; } // 6. Configuration-Setting Instructions def : WriteRes; def : WriteRes; def : WriteRes; // 7. Vector Loads and Stores // Note that the latency of vector loads are measured by consuming the loaded // value with vmv.x.s before subtracting the latency of vmv.x.s from the number. foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 8 in { let ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVLDE", [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLDFF", [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSTE", [SiFiveP600VST], mx, IsWorstCase>; } // Mask load and store have a maximum EMUL of 1. let ReleaseAtCycles = [SiFiveP600GetLMulCycles<"M1">.c] in { defm "" : LMULWriteResMX<"WriteVLDM", [SiFiveP600VLD], mx, IsWorstCase=!eq(mx,"M1")>; defm "" : LMULWriteResMX<"WriteVSTM", [SiFiveP600VST], mx, IsWorstCase=!eq(mx,"M1")>; } } foreach eew = [8, 16, 32, 64] in { let Latency = SiFiveP600StridedLdStLatency.val, ReleaseAtCycles = [SiFiveP600GetVLMAX.val] in { defm "" : LMULWriteResMX<"WriteVLDS" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLDUX" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLDOX" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSTS" # eew, [SiFiveP600VST], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSTUX" # eew, [SiFiveP600VST], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSTOX" # eew, [SiFiveP600VST], mx, IsWorstCase>; } } } foreach mx = SchedMxList in { foreach nf=2-8 in { foreach eew = [8, 16, 32, 64] in { defvar LMulLat = SiFiveP600SegmentedLdStCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = !add(12, LMulLat), ReleaseAtCycles = [!add(12, LMulLat)] in { defm "" : LMULWriteResMX<"WriteVLSEG" # nf # "e" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLSEGFF" # nf # "e" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLSSEG" # nf # "e" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLUXSEG" # nf # "e" # eew, [SiFiveP600VLD], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVLOXSEG" # nf # "e" # eew, [SiFiveP600VLD], mx, IsWorstCase>; } let Latency = !add(1, LMulLat), ReleaseAtCycles = [!add(12, LMulLat)] in { defm "" : LMULWriteResMX<"WriteVSSEG" # nf # "e" # eew, [SiFiveP600VST], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSSSEG" # nf # "e" # eew, [SiFiveP600VST], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSUXSEG" # nf # "e" # eew, [SiFiveP600VST], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSOXSEG" # nf # "e" # eew, [SiFiveP600VST], mx, IsWorstCase>; } } } } // Whole register move/load/store foreach LMul = [1, 2, 4, 8] in { let Latency = 8, ReleaseAtCycles = [LMul] in { def : WriteRes("WriteVLD" # LMul # "R"), [SiFiveP600VLD]>; def : WriteRes("WriteVST" # LMul # "R"), [SiFiveP600VST]>; } let Latency = 2, ReleaseAtCycles = [LMul] in { def : WriteRes("WriteVMov" # LMul # "V"), [SiFiveP600VectorArith]>; } } // 11. Vector Integer Arithmetic Instructions foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 2, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVExtV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICmpV", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICmpX", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICmpI", [SiFiveP600VectorMask], mx, IsWorstCase>; } let ReleaseAtCycles = [LMulLat] in { let Latency = 6 in { defm "" : LMULWriteResMX<"WriteVIMulV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMulX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMulAddV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMulAddX", [SiFiveP600VectorArith], mx, IsWorstCase>; } let Latency = !mul(2, SiFiveP600RVVMultiplier.c) in { defm "" : LMULWriteResMX<"WriteVIALUV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIALUX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIALUI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICALUV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICALUX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICALUI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICALUMV", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICALUMX", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVICALUMI", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMergeV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMergeX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMergeI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMovX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMovI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVShiftI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVShiftV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVShiftX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMinMaxV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMinMaxX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIMovV", [SiFiveP600VectorArith], mx, IsWorstCase>; } } } // Widening foreach mx = SchedMxListW in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 6, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVIWALUV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIWALUX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIWALUI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIWMulV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIWMulX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIWMulAddV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVIWMulAddX", [SiFiveP600VectorArith], mx, IsWorstCase>; // Special case for variants with widen operands. let ReleaseAtCycles = [!mul(LMulLat, 2)] in def P600WriteVIWALUWidenOp_ # mx : SchedWriteRes<[SiFiveP600VectorArith]>; } defvar P600VIWALUBaseSchedRW = [!cast("P600WriteVIWALUWidenOp_" # mx), !cast("ReadVPassthru_" # mx), !cast("ReadVIALUV_" # mx), !cast("ReadVIALUV_" # mx)]; def : InstRW; def : InstRW; def : InstRW("ReadVMask")]), (instregex "^PseudoVW(ADD|SUB)[U]?_W(V|X)_" # mx # "_MASK$")>; def : InstRW("ReadVMask")]), (instregex "^PseudoVW(ADD|SUB)[U]?_WV_" # mx # "_MASK_TIED$")>; } // Worst case needs 51/45/42/72 * lmul cycles for i8/16/32/64. foreach mx = SchedMxList in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; defvar DivMicroOpLat = !cond(!eq(sew, 8): 51, !eq(sew, 16): 45, !eq(sew, 32): 42, /* SEW=64 */ true: 72); defvar DivLatency = !mul(DivMicroOpLat, LMulLat); let Latency = DivLatency, ReleaseAtCycles = [LMulLat, DivLatency] in { defm "" : LMULSEWWriteResMXSEW<"WriteVIDivV", [SiFiveP600VEXQ1, SiFiveP600VDiv], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVIDivX", [SiFiveP600VEXQ1, SiFiveP600VDiv], mx, sew, IsWorstCase>; } } } // Narrowing Shift and Clips foreach mx = SchedMxListW in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 2, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVNShiftV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVNShiftX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVNShiftI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVNClipV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVNClipX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVNClipI", [SiFiveP600VectorArith], mx, IsWorstCase>; } } // 12. Vector Fixed-Point Arithmetic Instructions foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 6, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVSALUV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSALUX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSALUI", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVAALUV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVAALUX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSMulV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSMulX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSShiftV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSShiftX", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSShiftI", [SiFiveP600VectorArith], mx, IsWorstCase>; } } // 13. Vector Floating-Point Instructions foreach mx = SchedMxListF in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = 6, ReleaseAtCycles = [LMulLat] in { defm "" : LMULSEWWriteResMXSEW<"WriteVFALUV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFALUF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFMulV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFMulF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFMulAddV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFMulAddF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; } } } foreach mx = SchedMxListF in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = !mul(2, SiFiveP600RVVMultiplier.c), ReleaseAtCycles = [LMulLat] in { defm "" : LMULSEWWriteResMXSEW<"WriteVFRecpV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFSgnjV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFSgnjF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFMinMaxV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFMinMaxF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; } let Latency = !if(!eq(mx, "M8"), 4, 3), ReleaseAtCycles = [!if(!eq(LMulLat, 1), 2, LMulLat)] in defm "" : LMULSEWWriteResMXSEW<"WriteVFCvtIToFV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; } } foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = !if(!eq(mx, "M8"), 4, 3), ReleaseAtCycles = [!if(!eq(LMulLat, 1), 2, LMulLat)] in defm "" : LMULWriteResMX<"WriteVFCvtFToIV", [SiFiveP600VectorArith], mx, IsWorstCase>; let Latency = 2, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVFCmpV", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVFCmpF", [SiFiveP600VectorMask], mx, IsWorstCase>; } let Latency = !mul(2, SiFiveP600RVVMultiplier.c), ReleaseAtCycles = [!if(!eq(LMulLat, 1), 2, LMulLat)] in { defm "" : LMULWriteResMX<"WriteVFClassV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVFMergeV", [SiFiveP600VectorArith], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVFMovV", [SiFiveP600VectorArith], mx, IsWorstCase>; } } // Widening foreach mx = SchedMxListW in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = 3, ReleaseAtCycles = [LMulLat] in defm "" : LMULSEWWriteResMXSEW<"WriteVFWCvtIToFV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; } } foreach mx = SchedMxListFW in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 6, ReleaseAtCycles = [LMulLat] in defm "" : LMULWriteResMX<"WriteVFWCvtFToIV", [SiFiveP600VectorArith], mx, IsWorstCase>; } foreach mx = SchedMxListFW in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = 6, ReleaseAtCycles = [LMulLat] in { defm "" : LMULSEWWriteResMXSEW<"WriteVFWALUV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFWALUF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulAddV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulAddF", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFWCvtFToFV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; // Special case for variants with widen operands. let ReleaseAtCycles = [!mul(LMulLat, 2)] in def P600WriteVFWALUWidenOp_ # mx # _E # sew : SchedWriteRes<[SiFiveP600VectorArith]>; } defvar P600VFWALUBaseSchedRW = [!cast("P600WriteVFWALUWidenOp_" # mx # "_E" # sew), !cast("ReadVPassthru_" # mx # "_E" # sew), !cast("ReadVFWALUV_" # mx # "_E" # sew)]; def : InstRW("ReadVFWALUV_" # mx # "_E" # sew)]), (instregex "^PseudoVFW(ADD|SUB)_WV_" # mx # "_E" # sew # "$")>; def : InstRW<[P600VFWALUBaseSchedRW[0], P600VFWALUBaseSchedRW[2], !cast("ReadVFWALUV_" # mx # "_E" # sew)], (instregex "^PseudoVFW(ADD|SUB)_WV_" # mx # "_E" # sew # "_TIED$")>; def : InstRW("ReadVFWALUF_" # mx # "_E" # sew)]), (instregex "^PseudoVFW(ADD|SUB)_WFPR" # sew # "_" # mx # "_E" # sew # "$")>; def : InstRW("ReadVFWALUV_" # mx # "_E" # sew), !cast("ReadVMask")]), (instregex "^PseudoVFW(ADD|SUB)_WV_" # mx # "_E" # sew # "_MASK$")>; def : InstRW<[P600VFWALUBaseSchedRW[0], P600VFWALUBaseSchedRW[1], !cast("ReadVFWALUV_" # mx # "_E" # sew), !cast("ReadVMask")], (instregex "^PseudoVFW(ADD|SUB)_WV_" # mx # "_E" # sew # "_MASK_TIED$")>; def : InstRW("ReadVFWALUF_" # mx # "_E" # sew), !cast("ReadVMask")]), (instregex "^PseudoVFW(ADD|SUB)_WFPR" # sew # "_" # mx # "_E" # sew # "_MASK$")>; } } // Narrowing foreach mx = SchedMxListW in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 3, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVFNCvtFToIV", [SiFiveP600VectorArith], mx, IsWorstCase>; } } foreach mx = SchedMxListFW in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = 3, ReleaseAtCycles = [!if(!eq(LMulLat, 1), 2, LMulLat)] in { defm "" : LMULSEWWriteResMXSEW<"WriteVFNCvtIToFV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFNCvtFToFV", [SiFiveP600VectorArith], mx, sew, IsWorstCase>; } } } // Worst case needs around 29/25/37 * LMUL cycles for f16/32/64. foreach mx = SchedMxListF in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; defvar DivMicroOpLat = !cond(!eq(sew, 16): 29, !eq(sew, 32): 25, /* SEW=64 */ true: 37); defvar DivLatency = !mul(DivMicroOpLat, LMulLat); let Latency = DivLatency, ReleaseAtCycles = [LMulLat, DivLatency] in { defm "" : LMULSEWWriteResMXSEW<"WriteVFDivV", [SiFiveP600VEXQ1, SiFiveP600VFloatDiv], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFDivF", [SiFiveP600VEXQ1, SiFiveP600VFloatDiv], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVFSqrtV", [SiFiveP600VEXQ1, SiFiveP600VFloatDiv], mx, sew, IsWorstCase>; } } } // 14. Vector Reduction Operations foreach mx = SchedMxList in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let ReleaseAtCycles = [LMulLat] in { let Latency = SiFiveP600VIReductionLatency.c in defm "" : LMULSEWWriteResMXSEW<"WriteVIRedV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; let Latency = SiFiveP600VIMinMaxReductionLatency.c, ReleaseAtCycles = [SiFiveP600VIMinMaxReductionCycles.c] in defm "" : LMULSEWWriteResMXSEW<"WriteVIRedMinMaxV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } } } foreach mx = SchedMxListWRed in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = SiFiveP600VIReductionLatency.c, ReleaseAtCycles = [LMulLat] in { defm "" : LMULSEWWriteResMXSEW<"WriteVIWRedV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } } } foreach mx = SchedMxListF in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = SiFiveP600VFMinMaxReduction.latency, ReleaseAtCycles = [SiFiveP600VFMinMaxReduction.cycles] in defm "" : LMULSEWWriteResMXSEW<"WriteVFRedMinMaxV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; let Latency = SiFiveP600VFUnorderedReduction.latency, ReleaseAtCycles = [SiFiveP600VFUnorderedReduction.cycles] in defm "" : LMULSEWWriteResMXSEW<"WriteVFRedV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; let Latency = SiFiveP600VFOrderedReduction.c, ReleaseAtCycles = [SiFiveP600VFOrderedReduction.c] in defm "" : LMULSEWWriteResMXSEW<"WriteVFRedOV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } } foreach mx = SchedMxListFWRed in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = SiFiveP600VFWidenUnorderedReduction.latency, ReleaseAtCycles = [6] in defm "" : LMULSEWWriteResMXSEW<"WriteVFWRedV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; let Latency = SiFiveP600VFOrderedReduction.c, ReleaseAtCycles = [SiFiveP600VFOrderedReduction.c] in defm "" : LMULSEWWriteResMXSEW<"WriteVFWRedOV_From", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } } // 15. Vector Mask Instructions foreach mx = SchedMxList in { defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 2 in { defm "" : LMULWriteResMX<"WriteVMALUV", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVMSFSV", [SiFiveP600VectorMask], mx, IsWorstCase>; let ReleaseAtCycles = [2] in { defm "" : LMULWriteResMX<"WriteVMPopV", [SiFiveP600VectorMask], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVMFFSV", [SiFiveP600VectorMask], mx, IsWorstCase>; } } defvar LMulLat = SiFiveP600GetLMulCycles.c; let ReleaseAtCycles = [LMulLat] in { let Latency = 2 in defm "" : LMULWriteResMX<"WriteVIotaV", [SiFiveP600VectorMask], mx, IsWorstCase>; // vid.v isn't executed by the mask unit. let Latency = !if(!eq(mx, "M8"), 4, !if(!eq(mx, "M4"), 2, 1)) in defm "" : LMULWriteResMX<"WriteVIdxV", [SiFiveP600VectorArith], mx, IsWorstCase>; } } // 16. Vector Permutation Instructions // Simple Slide foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let ReleaseAtCycles = [LMulLat] in { let Latency = SiFiveP600VSlideI.c in defm "" : LMULWriteResMX<"WriteVSlideI", [SiFiveP600VEXQ0], mx, IsWorstCase>; let Latency = SiFiveP600VSlide1.c in { defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFiveP600VEXQ0], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVFSlide1F", [SiFiveP600VEXQ0], mx, IsWorstCase>; } } } foreach mx = ["MF8", "MF4", "MF2", "M1"] in { defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 2, ReleaseAtCycles = [1] in { defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ0], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ0], mx, IsWorstCase>; } } // Complex Slide foreach mx = ["M8", "M4", "M2"] in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = SiFiveP600VSlideXComplex.latency in { let ReleaseAtCycles = [SiFiveP600VSlideXComplex.cycles] in defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ1], mx, IsWorstCase>; let ReleaseAtCycles = [SiFiveP600VSlideXComplex.cycles] in defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ1], mx, IsWorstCase>; } } let Latency = 2, ReleaseAtCycles = [2] in { def : WriteRes; def : WriteRes; def : WriteRes; def : WriteRes; } // Simple Gather and Compress foreach mx = ["MF8", "MF4", "MF2", "M1"] in { defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 3, ReleaseAtCycles = [1] in { defm "" : LMULWriteResMX<"WriteVRGatherVX", [SiFiveP600VEXQ1], mx, IsWorstCase>; } } foreach mx = ["MF8", "MF4", "MF2", "M1"] in { foreach sew = SchedSEWSet.val in { defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 3, ReleaseAtCycles = [1] in { defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } } } // Complex Gather and Compress foreach mx = ["M2", "M4", "M8"] in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 6, ReleaseAtCycles = [SiFiveP600VPermutationComplex.c] in { defm "" : LMULWriteResMX<"WriteVRGatherVX", [SiFiveP600VEXQ1], mx, IsWorstCase>; } } foreach mx = ["M2", "M4", "M8"] in { foreach sew = SchedSEWSet.val in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW.c; let Latency = 6 in { let ReleaseAtCycles = [SiFiveP600VPermutationComplex.c] in { defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } let ReleaseAtCycles = [!add(SiFiveP600VPermutationComplex.c, 1)] in defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>; } } } // Simple Vrgather.vi foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; let Latency = 3, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVRGatherVI", [SiFiveP600VEXQ1], mx, IsWorstCase>; } } // Vector Crypto foreach mx = SchedMxList in { defvar LMulLat = SiFiveP600GetLMulCycles.c; defvar IsWorstCase = SiFiveP600IsWorstCaseMX.c; // Zvbb let ReleaseAtCycles = [LMulLat] in { let Latency = 2 in { // FIXME: Exegesis was not able to measure the latency of these instructions. // We probably should update them at some point. defm "" : LMULWriteResMX<"WriteVCPOPV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVWSLLV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVWSLLX", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVWSLLI", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } let Latency = SiFiveP600VCryptoLatency.c in { defm "" : LMULWriteResMX<"WriteVBREVV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVCLZV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVCTZV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; def P600WriteVANDN_ # mx : SchedWriteRes<[SiFiveP600VectorCrypto]>; } } // Special case for VANDN -- we execute it on vector crypto unit. defvar P600VANDNBaseSchedRW = [!cast("P600WriteVANDN_" # mx), // VANDN always merge read operand. !cast("ReadVPassthru_" # mx), !cast("ReadVIALUV_" # mx), !cast("ReadVIALUV_" # mx)]; def : InstRW; def : InstRW("ReadVMask")]), (instregex "^PseudoVANDN_V(V|X)_" # mx # "_MASK$")>; // Zvbc let Latency = SiFiveP600VCryptoLatency.c, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVCLMULV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVCLMULX", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } // Zvkb // VANDN uses WriteVIALU[V|X|I] let Latency = SiFiveP600VCryptoLatency.c, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVBREV8V", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVREV8V", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVRotV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVRotX", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVRotI", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } // Zvkg let Latency = SiFiveP600VCryptoLatency.c, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVGHSHV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVGMULV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } // ZvknhaOrZvknhb // FIXME: The latency is probably wrong. let Latency = 3, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVSHA2CHV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSHA2CLV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defvar ZvknhSEWs = !listremove(SchedSEWSet.val, [8, 16]); // Largest SEW is the last element, assuming SchedSEWSet is sorted in ascending // order. defvar LargestZvknhSEW = !foldl(!head(ZvknhSEWs), ZvknhSEWs, last, curr, curr); foreach sew = ZvknhSEWs in { // The worst case for Zvknh[ab] is designated to the largest SEW and LMUL. defvar IsWorstCaseVSHA2MSV = !and(IsWorstCase, !eq(sew, LargestZvknhSEW)); let ReleaseAtCycles = [SiFiveP600VSHA2MSCycles.c] in defm "" : LMULSEWWriteResMXSEW<"WriteVSHA2MSV", [SiFiveP600VectorCrypto], mx, sew, IsWorstCaseVSHA2MSV>; } } // Zvkned let Latency = 2 in { let ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVAESMVV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVAESKF1V", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVAESKF2V", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } let ReleaseAtCycles = [!if(!lt(LMulLat, 2), LMulLat, !div(LMulLat, 2))] in defm "" : LMULWriteResMX<"WriteVAESZV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } // Zvksed let Latency = 3, ReleaseAtCycles = [SiFiveP600VSM3CCycles.c] in defm "" : LMULWriteResMX<"WriteVSM3CV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; let Latency = 6, ReleaseAtCycles = [LMulLat] in defm "" : LMULWriteResMX<"WriteVSM3MEV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; let Latency = 3, ReleaseAtCycles = [LMulLat] in { defm "" : LMULWriteResMX<"WriteVSM4KV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; defm "" : LMULWriteResMX<"WriteVSM4RV", [SiFiveP600VectorCrypto], mx, IsWorstCase>; } } // Others def : WriteRes; def : WriteRes; def : WriteRes; // FIXME: This could be better modeled by looking at the regclasses of the operands. def : InstRW<[WriteIALU, ReadIALU], (instrs COPY)>; //===----------------------------------------------------------------------===// // Bypass and advance def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; // Bitmanip def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; // 6. Configuration-Setting Instructions def : ReadAdvance; def : ReadAdvance; // 7. Vector Loads and Stores def : ReadAdvance; def : ReadAdvance; defm "" : LMULReadAdvance<"ReadVSTEV", 0>; defm "" : LMULReadAdvance<"ReadVSTM", 0>; def : ReadAdvance; def : ReadAdvance; defm "" : LMULReadAdvance<"ReadVSTS8V", 0>; defm "" : LMULReadAdvance<"ReadVSTS16V", 0>; defm "" : LMULReadAdvance<"ReadVSTS32V", 0>; defm "" : LMULReadAdvance<"ReadVSTS64V", 0>; defm "" : LMULReadAdvance<"ReadVLDUXV", 0>; defm "" : LMULReadAdvance<"ReadVLDOXV", 0>; defm "" : LMULReadAdvance<"ReadVSTUX8", 0>; defm "" : LMULReadAdvance<"ReadVSTUX16", 0>; defm "" : LMULReadAdvance<"ReadVSTUX32", 0>; defm "" : LMULReadAdvance<"ReadVSTUX64", 0>; defm "" : LMULReadAdvance<"ReadVSTUXV", 0>; defm "" : LMULReadAdvance<"ReadVSTUX8V", 0>; defm "" : LMULReadAdvance<"ReadVSTUX16V", 0>; defm "" : LMULReadAdvance<"ReadVSTUX32V", 0>; defm "" : LMULReadAdvance<"ReadVSTUX64V", 0>; defm "" : LMULReadAdvance<"ReadVSTOX8", 0>; defm "" : LMULReadAdvance<"ReadVSTOX16", 0>; defm "" : LMULReadAdvance<"ReadVSTOX32", 0>; defm "" : LMULReadAdvance<"ReadVSTOX64", 0>; defm "" : LMULReadAdvance<"ReadVSTOXV", 0>; defm "" : LMULReadAdvance<"ReadVSTOX8V", 0>; defm "" : LMULReadAdvance<"ReadVSTOX16V", 0>; defm "" : LMULReadAdvance<"ReadVSTOX32V", 0>; defm "" : LMULReadAdvance<"ReadVSTOX64V", 0>; // LMUL Aware def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; // 12. Vector Integer Arithmetic Instructions defm : LMULReadAdvance<"ReadVIALUV", 0>; defm : LMULReadAdvance<"ReadVIALUX", 0>; defm : LMULReadAdvanceW<"ReadVIWALUV", 0>; defm : LMULReadAdvanceW<"ReadVIWALUX", 0>; defm : LMULReadAdvance<"ReadVExtV", 0>; defm : LMULReadAdvance<"ReadVICALUV", 0>; defm : LMULReadAdvance<"ReadVICALUX", 0>; defm : LMULReadAdvance<"ReadVShiftV", 0>; defm : LMULReadAdvance<"ReadVShiftX", 0>; defm : LMULReadAdvanceW<"ReadVNShiftV", 0>; defm : LMULReadAdvanceW<"ReadVNShiftX", 0>; defm : LMULReadAdvance<"ReadVICmpV", 0>; defm : LMULReadAdvance<"ReadVICmpX", 0>; defm : LMULReadAdvance<"ReadVIMinMaxV", 0>; defm : LMULReadAdvance<"ReadVIMinMaxX", 0>; defm : LMULReadAdvance<"ReadVIMulV", 0>; defm : LMULReadAdvance<"ReadVIMulX", 0>; defm : LMULSEWReadAdvance<"ReadVIDivV", 0>; defm : LMULSEWReadAdvance<"ReadVIDivX", 0>; defm : LMULReadAdvanceW<"ReadVIWMulV", 0>; defm : LMULReadAdvanceW<"ReadVIWMulX", 0>; defm : LMULReadAdvance<"ReadVIMulAddV", 0>; defm : LMULReadAdvance<"ReadVIMulAddX", 0>; defm : LMULReadAdvanceW<"ReadVIWMulAddV", 0>; defm : LMULReadAdvanceW<"ReadVIWMulAddX", 0>; defm : LMULReadAdvance<"ReadVIMergeV", 0>; defm : LMULReadAdvance<"ReadVIMergeX", 0>; defm : LMULReadAdvance<"ReadVIMovV", 0>; defm : LMULReadAdvance<"ReadVIMovX", 0>; // 13. Vector Fixed-Point Arithmetic Instructions defm "" : LMULReadAdvance<"ReadVSALUV", 0>; defm "" : LMULReadAdvance<"ReadVSALUX", 0>; defm "" : LMULReadAdvance<"ReadVAALUV", 0>; defm "" : LMULReadAdvance<"ReadVAALUX", 0>; defm "" : LMULReadAdvance<"ReadVSMulV", 0>; defm "" : LMULReadAdvance<"ReadVSMulX", 0>; defm "" : LMULReadAdvance<"ReadVSShiftV", 0>; defm "" : LMULReadAdvance<"ReadVSShiftX", 0>; defm "" : LMULReadAdvanceW<"ReadVNClipV", 0>; defm "" : LMULReadAdvanceW<"ReadVNClipX", 0>; // 14. Vector Floating-Point Instructions defm "" : LMULSEWReadAdvanceF<"ReadVFALUV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFALUF", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWALUV", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWALUF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFMulV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFMulF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFDivV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFDivF", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulV", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFMulAddV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFMulAddF", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulAddV", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulAddF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFSqrtV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFRecpV", 0>; defm "" : LMULReadAdvance<"ReadVFCmpV", 0>; defm "" : LMULReadAdvance<"ReadVFCmpF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFMinMaxV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFMinMaxF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFSgnjV", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFSgnjF", 0>; defm "" : LMULReadAdvance<"ReadVFClassV", 0>; defm "" : LMULReadAdvance<"ReadVFMergeV", 0>; defm "" : LMULReadAdvance<"ReadVFMergeF", 0>; defm "" : LMULReadAdvance<"ReadVFMovF", 0>; defm "" : LMULSEWReadAdvanceF<"ReadVFCvtIToFV", 0>; defm "" : LMULReadAdvance<"ReadVFCvtFToIV", 0>; defm "" : LMULSEWReadAdvanceW<"ReadVFWCvtIToFV", 0>; defm "" : LMULReadAdvanceFW<"ReadVFWCvtFToIV", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFWCvtFToFV", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFNCvtIToFV", 0>; defm "" : LMULReadAdvanceW<"ReadVFNCvtFToIV", 0>; defm "" : LMULSEWReadAdvanceFW<"ReadVFNCvtFToFV", 0>; // 15. Vector Reduction Operations def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; // 16. Vector Mask Instructions defm "" : LMULReadAdvance<"ReadVMALUV", 0>; defm "" : LMULReadAdvance<"ReadVMPopV", 0>; defm "" : LMULReadAdvance<"ReadVMFFSV", 0>; defm "" : LMULReadAdvance<"ReadVMSFSV", 0>; defm "" : LMULReadAdvance<"ReadVIotaV", 0>; // 17. Vector Permutation Instructions def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; defm "" : LMULReadAdvance<"ReadVISlideV", 0>; defm "" : LMULReadAdvance<"ReadVISlideX", 0>; defm "" : LMULReadAdvance<"ReadVFSlideV", 0>; defm "" : LMULReadAdvance<"ReadVFSlideF", 0>; defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_data", 0>; defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_index", 0>; defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_data", 0>; defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_index", 0>; defm "" : LMULReadAdvance<"ReadVRGatherVX_data", 0>; defm "" : LMULReadAdvance<"ReadVRGatherVX_index", 0>; defm "" : LMULReadAdvance<"ReadVRGatherVI_data", 0>; defm "" : LMULSEWReadAdvance<"ReadVCompressV", 0>; // LMUL Aware def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; def : ReadAdvance; // Others def : ReadAdvance; def : ReadAdvance; foreach mx = SchedMxList in { def : ReadAdvance("ReadVPassthru_" # mx), 0>; foreach sew = SchedSEWSet.val in def : ReadAdvance("ReadVPassthru_" # mx # "_E" # sew), 0>; } // Vector Crypto Extensions // Zvbb defm "" : LMULReadAdvance<"ReadVBREVV", 0>; defm "" : LMULReadAdvance<"ReadVCLZV", 0>; defm "" : LMULReadAdvance<"ReadVCPOPV", 0>; defm "" : LMULReadAdvance<"ReadVCTZV", 0>; defm "" : LMULReadAdvance<"ReadVWSLLV", 0>; defm "" : LMULReadAdvance<"ReadVWSLLX", 0>; // Zvbc defm "" : LMULReadAdvance<"ReadVCLMULV", 0>; defm "" : LMULReadAdvance<"ReadVCLMULX", 0>; // Zvkb // VANDN uses ReadVIALU[V|X|I] defm "" : LMULReadAdvance<"ReadVBREV8V", 0>; defm "" : LMULReadAdvance<"ReadVREV8V", 0>; defm "" : LMULReadAdvance<"ReadVRotV", 0>; defm "" : LMULReadAdvance<"ReadVRotX", 0>; // Zvkg defm "" : LMULReadAdvance<"ReadVGHSHV", 0>; defm "" : LMULReadAdvance<"ReadVGMULV", 0>; // Zvknha or Zvknhb defm "" : LMULReadAdvance<"ReadVSHA2CHV", 0>; defm "" : LMULReadAdvance<"ReadVSHA2CLV", 0>; defm "" : LMULSEWReadAdvance<"ReadVSHA2MSV", 0>; // Zvkned defm "" : LMULReadAdvance<"ReadVAESMVV", 0>; defm "" : LMULReadAdvance<"ReadVAESKF1V", 0>; defm "" : LMULReadAdvance<"ReadVAESKF2V", 0>; defm "" : LMULReadAdvance<"ReadVAESZV", 0>; // Zvksed defm "" : LMULReadAdvance<"ReadVSM4KV", 0>; defm "" : LMULReadAdvance<"ReadVSM4RV", 0>; // Zbksh defm "" : LMULReadAdvance<"ReadVSM3CV", 0>; defm "" : LMULReadAdvance<"ReadVSM3MEV", 0>; //===----------------------------------------------------------------------===// // Unsupported extensions defm : UnsupportedSchedQ; defm : UnsupportedSchedZabha; defm : UnsupportedSchedZbc; defm : UnsupportedSchedZbkb; defm : UnsupportedSchedZbkx; defm : UnsupportedSchedSFB; defm : UnsupportedSchedZfa; defm : UnsupportedSchedXsf; }