[mlir][affine]introducing new symbol rules that the result of a Pure
operation that whose operands are valid symbolic identifiers (#118478)
introducing new symbol rules that the result of a Pure operation that whose operands are valid symbolic identifiers.
This commit is contained in:
parent
eff6b64258
commit
d28a4f1fc0
@ -69,9 +69,7 @@ immediately enclosed by the latter),
|
||||
3. a value that dominates the `AffineScope` op enclosing the value's
|
||||
use,
|
||||
4. the result of a constant operation,
|
||||
5. the result of an
|
||||
[`affine.apply` operation](#affineapply-mliraffineapplyop) that recursively takes as
|
||||
arguments any valid symbolic identifiers, or
|
||||
5. the result of a `Pure` operation whose operands are valid symbolic identifiers.
|
||||
6. the result of a
|
||||
[`dim` operation](MemRef.md/#memrefdim-mlirmemrefdimop) on either a memref that
|
||||
is an argument to a `AffineScope` op or a memref where the corresponding
|
||||
|
@ -410,7 +410,8 @@ bool mlir::affine::isValidSymbol(Value value) {
|
||||
/// A value can be used as a symbol for `region` iff it meets one of the
|
||||
/// following conditions:
|
||||
/// *) It is a constant.
|
||||
/// *) It is the result of an affine apply operation with symbol arguments.
|
||||
/// *) It is a result of a `Pure` operation whose operands are valid symbolic
|
||||
/// *) identifiers.
|
||||
/// *) It is a result of the dim op on a memref whose corresponding size is
|
||||
/// a valid symbol.
|
||||
/// *) It is defined at the top level of 'region' or is its argument.
|
||||
@ -443,9 +444,12 @@ bool mlir::affine::isValidSymbol(Value value, Region *region) {
|
||||
if (matchPattern(defOp, m_Constant(&operandCst)))
|
||||
return true;
|
||||
|
||||
// Affine apply operation is ok if all of its operands are ok.
|
||||
if (auto applyOp = dyn_cast<AffineApplyOp>(defOp))
|
||||
return applyOp.isValidSymbol(region);
|
||||
// `Pure` operation that whose operands are valid symbolic identifiers.
|
||||
if (isPure(defOp) && llvm::all_of(defOp->getOperands(), [&](Value operand) {
|
||||
return affine::isValidSymbol(operand, region);
|
||||
})) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Dim op results could be valid symbols at any level.
|
||||
if (auto dimOp = dyn_cast<ShapedDimOpInterface>(defOp))
|
||||
|
@ -638,13 +638,13 @@ func.func @vecdim_reduction_complex_ub(%in: memref<256x512xf32>, %out: memref<25
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: #[[$map3:.*]] = affine_map<([[d0:.*]], [[d1:.*]]) -> ([[d0]], [[d1]] * 2)>
|
||||
// CHECK: #[[$map3_sub:.*]] = affine_map<([[d0:.*]], [[d1:.*]]) -> ([[d0]] - [[d1]])>
|
||||
// CHECK: #[[$map3:.*]] = affine_map<(d0, d1) -> (d0, d1 * 2)>
|
||||
// CHECK: #[[$map3_sub:.*]] = affine_map<(d0)[s0] -> (-d0 + s0)>
|
||||
// CHECK-LABEL: @vecdim_reduction_complex_ub
|
||||
// CHECK: %[[vzero:.*]] = arith.constant dense<0.000000e+00> : vector<128xf32>
|
||||
// CHECK: %{{.*}} = affine.for %[[iv:.*]] = 0 to min #[[$map3]](%[[M:.*]], %[[N:.*]]) step 128 iter_args(%[[red_iter:.*]] = {{.*}}) -> (vector<128xf32>) {
|
||||
// CHECK: %[[ub:.*]] = affine.min #[[$map3]](%[[M]], %[[N]])
|
||||
// CHECK: %[[elems_left:.*]] = affine.apply #[[$map3_sub]](%[[ub]], %[[iv]])
|
||||
// CHECK: %[[elems_left:.*]] = affine.apply #[[$map3_sub]](%[[iv]])[%[[ub]]]
|
||||
// CHECK: %[[mask:.*]] = vector.create_mask %[[elems_left]] : vector<128xi1>
|
||||
// CHECK: %[[ld:.*]] = vector.transfer_read %{{.*}} : memref<256x512xf32>, vector<128xf32>
|
||||
// CHECK: %[[select:.*]] = arith.select %[[mask]], %[[ld]], %[[vzero]] : vector<128xi1>, vector<128xf32>
|
||||
|
@ -20,36 +20,6 @@ func.func @affine_apply_resul_non_index(%arg0 : index) {
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
#map = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
func.func @affine_for_lower_bound_invalid_dim(%arg : index) {
|
||||
affine.for %n0 = 0 to 7 {
|
||||
%dim = arith.addi %arg, %arg : index
|
||||
|
||||
// expected-error@+1 {{operand cannot be used as a dimension id}}
|
||||
affine.for %n1 = 0 to #map(%dim)[%arg] {
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
#map = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
func.func @affine_for_upper_bound_invalid_dim(%arg : index) {
|
||||
affine.for %n0 = 0 to 7 {
|
||||
%dim = arith.addi %arg, %arg : index
|
||||
|
||||
// expected-error@+1 {{operand cannot be used as a dimension id}}
|
||||
affine.for %n1 = #map(%dim)[%arg] to 7 {
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
func.func @affine_load_invalid_dim(%M : memref<10xi32>) {
|
||||
"unknown"() ({
|
||||
@ -93,20 +63,6 @@ func.func @affine_for_upper_bound_invalid_sym() {
|
||||
|
||||
#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
|
||||
|
||||
func.func @affine_if_invalid_dim(%arg : index) {
|
||||
affine.for %n0 = 0 to 7 {
|
||||
%dim = arith.addi %arg, %arg : index
|
||||
|
||||
// expected-error@+1 {{operand cannot be used as a dimension id}}
|
||||
affine.if #set0(%dim)[%n0] {}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
#set0 = affine_set<(i)[N] : (i >= 0, N - i >= 0)>
|
||||
|
||||
func.func @affine_if_invalid_sym() {
|
||||
affine.for %i0 = 0 to 7 {
|
||||
// expected-error@+1 {{operand cannot be used as a symbol}}
|
||||
|
@ -324,3 +324,88 @@ module attributes {gpu.container_module} {
|
||||
// CHECK: affine.for %[[VAL_4:.*]] = %[[VAL_3]] to %[[VAL_2]] step 32 {
|
||||
// CHECK: }
|
||||
// CHECK: gpu.return
|
||||
|
||||
// -----
|
||||
|
||||
#map = affine_map<()[s0] -> (s0 mod 32)>
|
||||
|
||||
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0] -> (s0 mod 32)>
|
||||
|
||||
// CHECK-LABEL: gpu.func @affine_thread_id
|
||||
|
||||
module {
|
||||
gpu.module @gpu {
|
||||
gpu.func @affine_thread_id(%arg0: memref<?x?xf32>) kernel {
|
||||
%c3 = arith.constant 3 : index
|
||||
%dim = memref.dim %arg0, %c3 : memref<?x?xf32>
|
||||
%c0 = arith.constant 0 : index
|
||||
affine.for %arg3 = %c0 to %dim step 32 {
|
||||
%thread_id_x = gpu.thread_id x
|
||||
%0 = affine.apply #map()[%thread_id_x]
|
||||
%c128 = arith.constant 128 : index
|
||||
affine.for %arg4 = %0 to %c128 step 8 {
|
||||
%c32 = arith.constant 32 : index
|
||||
}
|
||||
}
|
||||
gpu.return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CHECK-SAME: (%[[VAL_0:.*]]: memref<?x?xf32>) kernel {
|
||||
// CHECK: %[[VAL_1:.*]] = arith.constant 3 : index
|
||||
// CHECK: %[[VAL_2:.*]] = memref.dim %[[VAL_0]], %[[VAL_1]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
|
||||
// CHECK: affine.for %[[VAL_4:.*]] = %[[VAL_3]] to %[[VAL_2]] step 32 {
|
||||
// CHECK: %[[VAL_5:.*]] = gpu.thread_id x
|
||||
// CHECK: %[[VAL_6:.*]] = affine.apply #[[$ATTR_0]](){{\[}}%[[VAL_5]]]
|
||||
// CHECK: %[[VAL_7:.*]] = arith.constant 128 : index
|
||||
// CHECK: affine.for %{{.*}} = %[[VAL_6]] to %[[VAL_7]] step 8 {
|
||||
|
||||
// -----
|
||||
|
||||
#map = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
// CHECK-LABEL: func @arith_add_vaild_symbol_upper_bound
|
||||
|
||||
func.func @arith_add_vaild_symbol_upper_bound(%arg : index) {
|
||||
affine.for %n0 = 0 to 7 {
|
||||
%dim = arith.addi %arg, %arg : index
|
||||
affine.for %n1 = 0 to #map(%dim)[%arg] {
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: index) {
|
||||
// CHECK: affine.for %[[VAL_1:.*]] = 0 to 7 {
|
||||
// CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_0]] : index
|
||||
// CHECK: affine.for %[[VAL_3:.*]] = 0 to #[[$ATTR_0]](%[[VAL_2]]){{\[}}%[[VAL_0]]] {
|
||||
// CHECK: }
|
||||
// CHECK: }
|
||||
|
||||
// -----
|
||||
|
||||
#map = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
// CHECK-LABEL: func @arith_add_vaild_symbol_lower_bound
|
||||
|
||||
func.func @arith_add_vaild_symbol_lower_bound(%arg : index) {
|
||||
affine.for %n0 = 0 to 7 {
|
||||
%dim = arith.addi %arg, %arg : index
|
||||
affine.for %n1 = #map(%dim)[%arg] to 7 {
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: index) {
|
||||
// CHECK: affine.for %[[VAL_1:.*]] = 0 to 7 {
|
||||
// CHECK: %[[VAL_2:.*]] = arith.addi %[[VAL_0]], %[[VAL_0]] : index
|
||||
// CHECK: affine.for %[[VAL_3:.*]] = #[[$ATTR_0]](%[[VAL_2]]){{\[}}%[[VAL_0]]] to 7 {
|
||||
// CHECK: }
|
||||
// CHECK: }
|
||||
|
@ -43,7 +43,7 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 128)>
|
||||
// CHECK-DAG: #[[$MAP:.*]] = affine_map<()[s0] -> (s0 floordiv 128)>
|
||||
|
||||
// CHECK-LABEL: func.func @warpgroup_3d(
|
||||
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
|
||||
@ -61,7 +61,7 @@ func.func @warpgroup_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
|
||||
// CHECK: gpu.launch
|
||||
// CHECK: %[[TIDX:.*]] = gpu.thread_id x
|
||||
// CHECK: %[[TIDY:.*]] = gpu.thread_id y
|
||||
// CHECK-DAG: %[[WG:.*]] = affine.apply #[[$MAP]](%[[TIDX]])
|
||||
// CHECK-DAG: %[[WG:.*]] = affine.apply #[[$MAP]]()[%[[TIDX]]]
|
||||
// CHECK-DAG: %[[CMPX:.*]] = arith.cmpi ult, %[[TIDX]], %[[C384]] : index
|
||||
// CHECK-DAG: %[[CMPY:.*]] = arith.cmpi ult, %[[TIDY]], %[[C1]] : index
|
||||
// CHECK: %[[COND:.*]] = arith.andi %[[CMPX]], %[[CMPY]] : i1
|
||||
@ -95,7 +95,7 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 floordiv 16)>
|
||||
// CHECK-DAG: #map = affine_map<()[s0] -> (s0 floordiv 16)>
|
||||
|
||||
// CHECK-LABEL: func.func @warp_3d(
|
||||
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
|
||||
@ -114,7 +114,7 @@ func.func @warp_3d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !g
|
||||
// CHECK: gpu.launch
|
||||
// CHECK: %[[TIDX:.*]] = gpu.thread_id x
|
||||
// CHECK: %[[TIDY:.*]] = gpu.thread_id y
|
||||
// CHECK-DAG: %[[W:.*]] = affine.apply #[[$MAP]](%[[TIDX]])
|
||||
// CHECK-DAG: %[[W:.*]] = affine.apply #[[$MAP]]()[%[[TIDX]]]
|
||||
// CHECK-DAG: %[[CMPX:.*]] = arith.cmpi ult, %[[TIDX]], %[[C32]] : index
|
||||
// CHECK-DAG: %[[CMPY:.*]] = arith.cmpi ult, %[[TIDY]], %[[C3]] : index
|
||||
// CHECK: %[[COND:.*]] = arith.andi %[[CMPX]], %[[CMPY]] : i1
|
||||
@ -354,9 +354,9 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAPWGLIN:.*]] = affine_map<(d0, d1, d2) -> (d0 + d1 * 32 + d2 * 256)>
|
||||
// CHECK-DAG: #[[$MAPWGX:.*]] = affine_map<(d0, d1) -> (((d0 + d1 * 32) floordiv 128) mod 2)>
|
||||
// CHECK-DAG: #[[$MAPWGY:.*]] = affine_map<(d0, d1, d2) -> (d2 + ((d0 + d1 * 32) floordiv 128) floordiv 2)>
|
||||
// CHECK-DAG: #[[$MAPWGLIN:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 32 + s2 * 256)>
|
||||
// CHECK-DAG: #[[$MAPWGX:.*]] = affine_map<()[s0, s1] -> (((s0 + s1 * 32) floordiv 128) mod 2)>
|
||||
// CHECK-DAG: #[[$MAPWGY:.*]] = affine_map<()[s0, s1, s2] -> (s2 + ((s0 + s1 * 32) floordiv 128) floordiv 2)>
|
||||
|
||||
// CHECK-LABEL: func.func @warpgroup_linear(
|
||||
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
|
||||
@ -376,9 +376,9 @@ func.func @warpgroup_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %st
|
||||
// CHECK-DAG: %[[TIDX:.*]] = gpu.thread_id x
|
||||
// CHECK-DAG: %[[TIDY:.*]] = gpu.thread_id y
|
||||
// CHECK-DAG: %[[TIDZ:.*]] = gpu.thread_id z
|
||||
// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWGLIN]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
|
||||
// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWGX]](%[[TIDX]], %[[TIDY]])
|
||||
// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWGY]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
|
||||
// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWGLIN]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
|
||||
// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWGX]]()[%[[TIDX]], %[[TIDY]]]
|
||||
// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWGY]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
|
||||
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[WIDLIN]], %[[C768]] : index
|
||||
// CHECK: scf.if %[[CMPLIN]]
|
||||
// CHECK: memref.load %[[ARGX]][%[[WIDX]], %[[WIDY]]]
|
||||
@ -410,9 +410,9 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAPWLIN:.*]] = affine_map<(d0, d1, d2) -> (d0 + d1 * 32 + d2 * 256)>
|
||||
// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<(d0, d1, d2) -> ((d1 + d2 * 8 + d0 floordiv 32) mod 2)>
|
||||
// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<(d0, d1, d2) -> ((d1 + d2 * 8 + d0 floordiv 32) floordiv 2)>
|
||||
// CHECK-DAG: #[[$MAPWLIN:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 32 + s2 * 256)>
|
||||
// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<()[s0, s1, s2] -> ((s1 + s2 * 8 + s0 floordiv 32) mod 2)>
|
||||
// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<()[s0, s1, s2] -> ((s1 + s2 * 8 + s0 floordiv 32) floordiv 2)>
|
||||
|
||||
// CHECK-LABEL: func.func @warp_linear(
|
||||
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
|
||||
@ -432,9 +432,9 @@ func.func @warp_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
|
||||
// CHECK-DAG: %[[TIDX:.*]] = gpu.thread_id x
|
||||
// CHECK-DAG: %[[TIDY:.*]] = gpu.thread_id y
|
||||
// CHECK-DAG: %[[TIDZ:.*]] = gpu.thread_id z
|
||||
// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWLIN]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
|
||||
// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
|
||||
// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
|
||||
// CHECK-DAG: %[[WIDLIN:.*]] = affine.apply #[[$MAPWLIN]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
|
||||
// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
|
||||
// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
|
||||
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[WIDLIN]], %[[C192]] : index
|
||||
// CHECK: scf.if %[[CMPLIN]]
|
||||
// CHECK: memref.load %[[ARGX]][%[[WIDX]], %[[WIDY]]]
|
||||
@ -466,12 +466,12 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<(d0, d1) -> (((d0 + d1 * 18) floordiv 32) mod 3)>
|
||||
// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<(d0, d1) -> ((((d0 + d1 * 18) floordiv 32) mod 6) floordiv 3)>
|
||||
// CHECK-DAG: #[[$MAPWX:.*]] = affine_map<()[s0, s1] -> (((s0 + s1 * 18) floordiv 32) mod 3)>
|
||||
// CHECK-DAG: #[[$MAPWY:.*]] = affine_map<()[s0, s1] -> ((((s0 + s1 * 18) floordiv 32) mod 6) floordiv 3)>
|
||||
|
||||
// CHECK-DAG: #[[$MAPLIN:.*]] = affine_map<(d0, d1) -> (d0 + d1 * 18)>
|
||||
// CHECK-DAG: #[[$MAPLX:.*]] = affine_map<(d0, d1) -> ((d0 + d1 * 18) mod 10)>
|
||||
// CHECK-DAG: #[[$MAPLY:.*]] = affine_map<(d0, d1) -> ((d0 + d1 * 18) floordiv 10)>
|
||||
// CHECK-DAG: #[[$MAPLIN:.*]] = affine_map<()[s0, s1] -> (s0 + s1 * 18)>
|
||||
// CHECK-DAG: #[[$MAPLX:.*]] = affine_map<()[s0, s1] -> ((s0 + s1 * 18) mod 10)>
|
||||
// CHECK-DAG: #[[$MAPLY:.*]] = affine_map<()[s0, s1] -> ((s0 + s1 * 18) floordiv 10)>
|
||||
|
||||
// CHECK-LABEL: func.func @map_multi_level_linear(
|
||||
func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !gpu.async.token) -> !type {
|
||||
@ -504,9 +504,9 @@ func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f3
|
||||
memref.store %6, %y[%i, %j] : !type
|
||||
} { mapping = [#gpu.thread<y>, #gpu.thread<x>]}
|
||||
|
||||
// CHECK-DAG: %[[LIN:.*]] = affine.apply #[[$MAPLIN]](%[[TIDX]], %[[TIDY]])
|
||||
// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]](%[[TIDX]], %[[TIDY]])
|
||||
// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]](%[[TIDX]], %[[TIDY]])
|
||||
// CHECK-DAG: %[[LIN:.*]] = affine.apply #[[$MAPLIN]]()[%[[TIDX]], %[[TIDY]]]
|
||||
// CHECK-DAG: %[[WIDX:.*]] = affine.apply #[[$MAPWX]]()[%[[TIDX]], %[[TIDY]]]
|
||||
// CHECK-DAG: %[[WIDY:.*]] = affine.apply #[[$MAPWY]]()[%[[TIDX]], %[[TIDY]]]
|
||||
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[LIN]], %[[C192]] : index
|
||||
// CHECK: scf.if %[[CMPLIN]]
|
||||
scf.forall (%i, %j, %k) in (%c3, %c2, %c1) {
|
||||
@ -515,8 +515,8 @@ func.func @map_multi_level_linear(%x: !type, %y: !type, %t: !type1d, %alpha : f3
|
||||
memref.store %8, %y[%i, %j] : !type
|
||||
} {mapping = [#gpu.warp<linear_dim_0>, #gpu.warp<linear_dim_1>, #gpu.warp<linear_dim_2>] }
|
||||
|
||||
// CHECK-DAG: %[[LIDX:.*]] = affine.apply #[[$MAPLX]](%[[TIDX]], %[[TIDY]])
|
||||
// CHECK-DAG: %[[LIDY:.*]] = affine.apply #[[$MAPLY]](%[[TIDX]], %[[TIDY]])
|
||||
// CHECK-DAG: %[[LIDX:.*]] = affine.apply #[[$MAPLX]]()[%[[TIDX]], %[[TIDY]]]
|
||||
// CHECK-DAG: %[[LIDY:.*]] = affine.apply #[[$MAPLY]]()[%[[TIDX]], %[[TIDY]]]
|
||||
// CHECK-DAG: %[[COND:.*]] = arith.cmpi ult, %[[LIN]], %[[C20]] : index
|
||||
// CHECK: scf.if %[[COND]]
|
||||
// CHECK: memref.load %{{.*}}[%[[LIDX]]] : memref<32xf32>
|
||||
@ -545,9 +545,9 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAPBLIN:.*]] = affine_map<(d0, d1, d2) -> (d0 + d1 * 12 + d2 * 108)>
|
||||
// CHECK-DAG: #[[$MAPBX:.*]] = affine_map<(d0, d1, d2) -> ((d0 + d1 * 12 + d2 * 108) mod 7)>
|
||||
// CHECK-DAG: #[[$MAPBY:.*]] = affine_map<(d0, d1, d2) -> ((d0 + d1 * 12 + d2 * 108) floordiv 7)>
|
||||
// CHECK-DAG: #[[$MAPBLIN:.*]] = affine_map<()[s0, s1, s2] -> (s0 + s1 * 12 + s2 * 108)>
|
||||
// CHECK-DAG: #[[$MAPBX:.*]] = affine_map<()[s0, s1, s2] -> ((s0 + s1 * 12 + s2 * 108) mod 7)>
|
||||
// CHECK-DAG: #[[$MAPBY:.*]] = affine_map<()[s0, s1, s2] -> ((s0 + s1 * 12 + s2 * 108) floordiv 7)>
|
||||
|
||||
// CHECK-LABEL: func.func @block_linear_existing_launch(
|
||||
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
|
||||
@ -566,9 +566,9 @@ func.func @block_linear_existing_launch(
|
||||
// CHECK-DAG: %[[BIDX:.*]] = gpu.block_id x
|
||||
// CHECK-DAG: %[[BIDY:.*]] = gpu.block_id y
|
||||
// CHECK-DAG: %[[BIDZ:.*]] = gpu.block_id z
|
||||
// CHECK-DAG: %[[BIDLIN:.*]] = affine.apply #[[$MAPBLIN]](%[[BIDX]], %[[BIDY]], %[[BIDZ]])
|
||||
// CHECK-DAG: %[[BLX:.*]] = affine.apply #[[$MAPBX]](%[[BIDX]], %[[BIDY]], %[[BIDZ]])
|
||||
// CHECK-DAG: %[[BLY:.*]] = affine.apply #[[$MAPBY]](%[[BIDX]], %[[BIDY]], %[[BIDZ]])
|
||||
// CHECK-DAG: %[[BIDLIN:.*]] = affine.apply #[[$MAPBLIN]]()[%[[BIDX]], %[[BIDY]], %[[BIDZ]]]
|
||||
// CHECK-DAG: %[[BLX:.*]] = affine.apply #[[$MAPBX]]()[%[[BIDX]], %[[BIDY]], %[[BIDZ]]]
|
||||
// CHECK-DAG: %[[BLY:.*]] = affine.apply #[[$MAPBY]]()[%[[BIDX]], %[[BIDY]], %[[BIDZ]]]
|
||||
// CHECK-DAG: %[[CMPLIN:.*]] = arith.cmpi ult, %[[BIDLIN]], %[[C63]] : index
|
||||
// CHECK: scf.if %[[CMPLIN]]
|
||||
// CHECK: memref.load %[[ARGX]][%[[BLX]], %[[BLY]]]
|
||||
@ -600,8 +600,8 @@ module attributes {transform.with_named_sequence} {
|
||||
!type = memref<2 x 32 x f32>
|
||||
!type1d = memref<32 x f32>
|
||||
|
||||
// CHECK-DAG: #[[$MAPBX:.*]] = affine_map<(d0) -> (d0 mod 7)>
|
||||
// CHECK-DAG: #[[$MAPBY:.*]] = affine_map<(d0, d1, d2) -> (d1 + d2 * 9 + d0 floordiv 7)>
|
||||
// CHECK-DAG: #[[$MAPBX:.*]] = affine_map<()[s0] -> (s0 mod 7)>
|
||||
// CHECK-DAG: #[[$MAPBY:.*]] = affine_map<()[s0, s1, s2] -> (s1 + s2 * 9 + s0 floordiv 7)>
|
||||
|
||||
// CHECK-LABEL: func.func @block_linear_generate_launch(
|
||||
// CHECK-SAME: %[[ARGX:[0-9a-z]+]]: memref<2x32xf32>
|
||||
@ -620,8 +620,8 @@ func.func @block_linear_generate_launch(
|
||||
// CHECK-DAG: %[[BIDX:.*]] = gpu.block_id x
|
||||
// CHECK-DAG: %[[BIDY:.*]] = gpu.block_id y
|
||||
// CHECK-DAG: %[[BIDZ:.*]] = gpu.block_id z
|
||||
// CHECK-DAG: %[[BLX:.*]] = affine.apply #[[$MAPBX]](%[[BIDX]])
|
||||
// CHECK-DAG: %[[BLY:.*]] = affine.apply #[[$MAPBY]](%[[BIDX]], %[[BIDY]], %[[BIDZ]])
|
||||
// CHECK-DAG: %[[BLX:.*]] = affine.apply #[[$MAPBX]]()[%[[BIDX]]]
|
||||
// CHECK-DAG: %[[BLY:.*]] = affine.apply #[[$MAPBY]]()[%[[BIDX]], %[[BIDY]], %[[BIDZ]]]
|
||||
// CHECK: memref.load %[[ARGX]][%[[BLX]], %[[BLY]]]
|
||||
// CHECK: memref.load %[[ARGY]][%[[BLX]], %[[BLY]]]
|
||||
scf.forall (%i, %j) in (%c7, %c9) {
|
||||
@ -647,8 +647,8 @@ module attributes {transform.with_named_sequence} {
|
||||
#map = affine_map<(d0) -> (d0 * 128)>
|
||||
#map1 = affine_map<(d0) -> (d0 * 32)>
|
||||
|
||||
// CHECK-DAG: #[[$MAPB:.*]] = affine_map<(d0) -> (d0 * 128)>
|
||||
// CHECK-DAG: #[[$MAPW:.*]] = affine_map<(d0, d1, d2) -> (d2 * 32 + ((d0 + d1 * 4) floordiv 32) * 32)>
|
||||
// CHECK-DAG: #[[$MAPB:.*]] = affine_map<()[s0] -> (s0 * 128)>
|
||||
// CHECK-DAG: #[[$MAPW:.*]] = affine_map<()[s0, s1, s2] -> (s2 * 32 + ((s0 + s1 * 4) floordiv 32) * 32)>
|
||||
|
||||
// CHECK-LABEL: func.func @simple_fill(
|
||||
func.func @simple_fill(%arg0: memref<128xf32>) -> memref<128xf32> {
|
||||
@ -660,14 +660,14 @@ func.func @simple_fill(%arg0: memref<128xf32>) -> memref<128xf32> {
|
||||
// CHECK: gpu.launch
|
||||
scf.forall (%arg1) in (1) {
|
||||
// CHECK: %[[BIDX:.*]] = gpu.block_id x
|
||||
// CHECK: %[[BLX:.*]] = affine.apply #[[$MAPB]](%[[BIDX]])
|
||||
// CHECK: %[[BLX:.*]] = affine.apply #[[$MAPB]]()[%[[BIDX]]]
|
||||
%0 = affine.apply #map(%arg1)
|
||||
%subview = memref.subview %arg0[%0] [128] [1] : memref<128xf32> to memref<128xf32, strided<[1], offset: ?>>
|
||||
scf.forall (%arg2) in (4) {
|
||||
// CHECK: %[[TIDX:.*]] = gpu.thread_id x
|
||||
// CHECK: %[[TIDY:.*]] = gpu.thread_id y
|
||||
// CHECK: %[[TIDZ:.*]] = gpu.thread_id z
|
||||
// CHECK: %[[THX:.*]] = affine.apply #[[$MAPW]](%[[TIDX]], %[[TIDY]], %[[TIDZ]])
|
||||
// CHECK: %[[THX:.*]] = affine.apply #[[$MAPW]]()[%[[TIDX]], %[[TIDY]], %[[TIDZ]]]
|
||||
// CHECK-NOT: scf.if
|
||||
// CHECK: memref.subview %{{.*}}[%[[THX]]]
|
||||
%1 = affine.apply #map1(%arg2)
|
||||
|
@ -40,9 +40,9 @@ module attributes {transform.with_named_sequence} {
|
||||
// CHECK: %[[KINDEX:.+]] = linalg.index 2 : index
|
||||
|
||||
// Compute input channel/convolved indices.
|
||||
// CHECK: %[[ICINDEX:.+]] = affine.apply affine_map<(d0) -> (d0 mod 4)>(%[[KINDEX]])
|
||||
// CHECK: %[[CONVH:.+]] = affine.apply affine_map<(d0, d1) -> (d0 floordiv 14 + d1 floordiv 12)>(%[[MINDEX]], %[[KINDEX]])
|
||||
// CHECK: %[[CONVW:.+]] = affine.apply affine_map<(d0, d1) -> (d0 mod 14 + (d1 mod 12) floordiv 4)>(%[[MINDEX]], %[[KINDEX]])
|
||||
// CHECK: %[[ICINDEX:.+]] = affine.apply affine_map<()[s0] -> (s0 mod 4)>()[%[[KINDEX]]]
|
||||
// CHECK: %[[CONVH:.+]] = affine.apply affine_map<()[s0, s1] -> (s0 floordiv 14 + s1 floordiv 12)>()[%[[MINDEX]], %[[KINDEX]]]
|
||||
// CHECK: %[[CONVW:.+]] = affine.apply affine_map<()[s0, s1] -> (s0 mod 14 + (s1 mod 12) floordiv 4)>()[%[[MINDEX]], %[[KINDEX]]]
|
||||
|
||||
// Extract from the input tensor.
|
||||
// CHECK: %[[EXTRACTED_INPUT:.+]] = tensor.extract
|
||||
@ -227,9 +227,9 @@ module attributes {transform.with_named_sequence} {
|
||||
// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
|
||||
|
||||
// Im2col maps
|
||||
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0) -> (d0 floordiv 9)>
|
||||
// CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1) -> (d0 floordiv 14 + (d1 mod 9) floordiv 3)>
|
||||
// CHECK-DAG: #[[MAP8:.+]] = affine_map<(d0, d1) -> (d0 + d1 - (d0 floordiv 14) * 14 - (d1 floordiv 3) * 3)>
|
||||
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 floordiv 9)>
|
||||
// CHECK-DAG: #[[MAP7:.+]] = affine_map<()[s0, s1] -> (s0 floordiv 14 + (s1 mod 9) floordiv 3)>
|
||||
// CHECK-DAG: #[[MAP8:.+]] = affine_map<()[s0, s1] -> (s0 + s1 - (s0 floordiv 14) * 14 - (s1 floordiv 3) * 3)>
|
||||
|
||||
|
||||
// CHECK-DAG: #[[LHSMAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)>
|
||||
@ -251,9 +251,9 @@ module attributes {transform.with_named_sequence} {
|
||||
// CHECK: %[[NINDEX:.+]] = linalg.index 2 : index
|
||||
|
||||
// Compute input channel/convolved indices.
|
||||
// CHECK: %[[ICINDEX:.+]] = affine.apply #[[MAP1]](%[[KINDEX]])
|
||||
// CHECK: %[[CONVH:.+]] = affine.apply #[[MAP7]](%[[NINDEX]], %[[KINDEX]])
|
||||
// CHECK: %[[CONVW:.+]] = affine.apply #[[MAP8]](%[[NINDEX]], %[[KINDEX]])
|
||||
// CHECK: %[[ICINDEX:.+]] = affine.apply #[[MAP1]]()[%[[KINDEX]]]
|
||||
// CHECK: %[[CONVH:.+]] = affine.apply #[[MAP7]]()[%[[NINDEX]], %[[KINDEX]]]
|
||||
// CHECK: %[[CONVW:.+]] = affine.apply #[[MAP8]]()[%[[NINDEX]], %[[KINDEX]]]
|
||||
|
||||
// Extract from the input tensor.
|
||||
// CHECK: %[[EXTRACTED_INPUT:.+]] = tensor.extract
|
||||
@ -300,9 +300,9 @@ module attributes {transform.with_named_sequence} {
|
||||
// CHECK: %[[KINDEX:.+]] = linalg.index 2 : index
|
||||
|
||||
// Compute input channel/convolved indices.
|
||||
// CHECK: %[[ICINDEX:.+]] = affine.apply affine_map<(d0) -> (d0 mod 4)>(%[[KINDEX]])
|
||||
// CHECK: %[[CONVH:.+]] = affine.apply affine_map<(d0, d1) -> (d0 floordiv 14 + d1 floordiv 12)>(%[[MINDEX]], %[[KINDEX]])
|
||||
// CHECK: %[[CONVW:.+]] = affine.apply affine_map<(d0, d1) -> (d0 mod 14 + (d1 mod 12) floordiv 4)>(%[[MINDEX]], %[[KINDEX]])
|
||||
// CHECK: %[[ICINDEX:.+]] = affine.apply affine_map<()[s0] -> (s0 mod 4)>()[%[[KINDEX]]]
|
||||
// CHECK: %[[CONVH:.+]] = affine.apply affine_map<()[s0, s1] -> (s0 floordiv 14 + s1 floordiv 12)>()[%[[MINDEX]], %[[KINDEX]]]
|
||||
// CHECK: %[[CONVW:.+]] = affine.apply affine_map<()[s0, s1] -> (s0 mod 14 + (s1 mod 12) floordiv 4)>()[%[[MINDEX]], %[[KINDEX]]]
|
||||
|
||||
// Extract from the input tensor.
|
||||
// CHECK: %[[EXTRACTED_INPUT:.+]] = tensor.extract
|
||||
|
@ -19,13 +19,13 @@ module attributes {transform.with_named_sequence} {
|
||||
}
|
||||
}
|
||||
|
||||
// TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
|
||||
// TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
// TILE-10n25-LABEL: func @indexed_vector
|
||||
// TILE-10n25: %[[C10:.*]] = arith.constant 10 : index
|
||||
// TILE-10n25: scf.for %[[J:.*]] = {{.*}} step %[[C10]]
|
||||
// TILE-10n25: linalg.generic
|
||||
// TILE-10n25: %[[I:.*]] = linalg.index 0 : index
|
||||
// TILE-10n25: %[[NEW_I:.*]] = affine.apply [[$MAP]](%[[I]], %[[J]])
|
||||
// TILE-10n25: %[[NEW_I:.*]] = affine.apply [[$MAP]](%[[J]])[%[[I]]]
|
||||
// TILE-10n25: linalg.yield %[[NEW_I]] : index
|
||||
|
||||
// -----
|
||||
@ -51,7 +51,7 @@ module attributes {transform.with_named_sequence} {
|
||||
}
|
||||
}
|
||||
|
||||
// TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
|
||||
// TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
// TILE-10n25-LABEL: func @indexed_matrix
|
||||
// TILE-10n25-DAG: %[[C25:.*]] = arith.constant 25 : index
|
||||
// TILE-10n25-DAG: %[[C10:.*]] = arith.constant 10 : index
|
||||
@ -59,8 +59,8 @@ module attributes {transform.with_named_sequence} {
|
||||
// TILE-10n25: scf.for %[[L:.*]] = {{.*}} step %[[C25]]
|
||||
// TILE-10n25: linalg.generic
|
||||
// TILE-10n25: %[[I:.*]] = linalg.index 0 : index
|
||||
// TILE-10n25: %[[NEW_I:.*]] = affine.apply [[$MAP]](%[[I]], %[[K]])
|
||||
// TILE-10n25: %[[NEW_I:.*]] = affine.apply [[$MAP]](%[[K]])[%[[I]]]
|
||||
// TILE-10n25: %[[J:.*]] = linalg.index 1 : index
|
||||
// TILE-10n25: %[[NEW_J:.*]] = affine.apply [[$MAP]](%[[J]], %[[L]])
|
||||
// TILE-10n25: %[[NEW_J:.*]] = affine.apply [[$MAP]](%[[L]])[%[[J]]]
|
||||
// TILE-10n25: %[[SUM:.*]] = arith.addi %[[NEW_I]], %[[NEW_J]] : index
|
||||
// TILE-10n25: linalg.yield %[[SUM]] : index
|
||||
|
@ -10,7 +10,7 @@ module attributes {transform.with_named_sequence} {
|
||||
|
||||
func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
|
||||
|
||||
// CHECK: #[[$ADD_42_MAP:.+]] = affine_map<(d0) -> (d0 + 42)>
|
||||
// CHECK: #[[$ADD_42_MAP:.+]] = affine_map<()[s0] -> (s0 + 42)>
|
||||
|
||||
// CHECK-LABEL: @one_d_static
|
||||
// CHECK-SAME: %[[IN:.+]]: tensor<100xf32>, %[[OUT:.+]]: tensor<100xf32>
|
||||
@ -30,7 +30,7 @@ func.func @one_d_static(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tenso
|
||||
// CHECK: ins(%[[IN_SLICE_HIGH]]
|
||||
// CHECK: outs(%[[OUT_SLICE_HIGH]]
|
||||
// CHECK: %[[IDX:.+]] = linalg.index 0
|
||||
// CHECK: affine.apply #[[$ADD_42_MAP]](%[[IDX]])
|
||||
// CHECK: affine.apply #[[$ADD_42_MAP]]()[%[[IDX]]]
|
||||
// CHECK: func.call @elem
|
||||
// CHECK: %[[RES:.+]] = tensor.insert_slice %[[RES_SLICE_HIGH]] into %[[RES_PARTIAL]][42] [58] [1]
|
||||
%0 = linalg.generic {
|
||||
|
@ -259,14 +259,14 @@ module attributes {transform.with_named_sequence} {
|
||||
transform.yield
|
||||
}
|
||||
}
|
||||
// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0, d1) -> (d0 + d1)>
|
||||
// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
// CHECK-LABEL: @indexed_semantics
|
||||
// CHECK: scf.for %[[I0:.+]] = %{{.*}} to %{{.*}} step %{{.*}}
|
||||
// CHECK: scf.for %[[I1:.+]] = %{{.*}} to %{{.*}} step %{{.*}}
|
||||
// CHECK: %[[INDEX0:.+]] = linalg.index 0
|
||||
// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX0]], %[[I0]])
|
||||
// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I0]])[%[[INDEX0]]]
|
||||
// CHECK: %[[INDEX1:.+]] = linalg.index 1
|
||||
// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX1]], %[[I1]])
|
||||
// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I1]])[%[[INDEX1]]]
|
||||
// CHECK: arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]]
|
||||
|
||||
// -----
|
||||
|
@ -205,7 +205,7 @@ module attributes {transform.with_named_sequence} {
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0, d1) -> (d0 + d1)>
|
||||
// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
|
||||
|
||||
func.func @indexed_semantics(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
// Check that we correctly amend "linalg.index" results.
|
||||
@ -241,9 +241,9 @@ module attributes {transform.with_named_sequence} {
|
||||
// CHECK-LABEL: @indexed_semantics
|
||||
// CHECK: scf.forall (%[[I0:.+]], %[[I1:.+]]) =
|
||||
// CHECK: %[[INDEX0:.+]] = linalg.index 0
|
||||
// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX0]], %[[I0]])
|
||||
// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I0]])[%[[INDEX0]]]
|
||||
// CHECK: %[[INDEX1:.+]] = linalg.index 1
|
||||
// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX1]], %[[I1]])
|
||||
// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I1]])[%[[INDEX1]]]
|
||||
// CHECK: arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]]
|
||||
|
||||
// -----
|
||||
|
Loading…
x
Reference in New Issue
Block a user