/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck_tile/ops/gemm_quant/pipeline/gemm_group_quant_utils.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck_tile/ops/gemm_quant/pipeline/gemm_group_quant_utils.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck_tile/ops/gemm_quant/pipeline/gemm_group_quant_utils.hpp Source File
gemm_group_quant_utils.hpp
Go to the documentation of this file.
1 // Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
2 // SPDX-License-Identifier: MIT
3 
4 #pragma once
5 
7 
8 namespace ck_tile {
9 
10 template <typename Problem, typename DataType, index_t YPerTile, index_t XPerTile>
11 CK_TILE_HOST_DEVICE static constexpr auto GetABQGlobalVectorLoadSize()
12 {
13  using I1 = number<1>;
14  constexpr index_t NWarps = Problem::BlockGemmShape::BlockWarps::at(I1{});
15 
16  constexpr index_t BlockSize = Problem::kBlockSize;
17 
18  // Data is replicated across warps along NWarps, so we divide BlockSize by NWarps
19  constexpr index_t elements_per_thread = (YPerTile * XPerTile) / (BlockSize / NWarps);
20  constexpr index_t PackedSize = ck_tile::numeric_traits<remove_cvref_t<DataType>>::PackedSize;
21 
22  // Define vector load candidates in descending order of priority
23  constexpr std::array<index_t, 5> candidates{
24  PackedSize * 32 / sizeof(DataType),
25  PackedSize * 16 / sizeof(DataType),
26  PackedSize * 8 / sizeof(DataType),
27  PackedSize * 4 / sizeof(DataType),
28  PackedSize * 2 / sizeof(DataType),
29  };
30 
31  for(const auto vec_size : candidates)
32  {
33  if(vec_size <= 0 || XPerTile % vec_size != 0 || elements_per_thread % vec_size != 0)
34  continue;
35  bool is_valid = (vec_size > 0) && (XPerTile % vec_size == 0) &&
36  (elements_per_thread % vec_size == 0) && vec_size != candidates[4];
37  if(is_valid)
38  {
39  return vec_size;
40  }
41  }
42  return PackedSize; // Absolute fallback
43 }
44 
45 // AQ holds groupquant scale data for A. Data is loaded from DRAM and partitioned across
46 // threads. Post mfma scales are shuffled across threads in the warp and applied to
47 // accum registers.
48 template <typename BlockGemmShape,
49  typename WarpGemm,
50  index_t BlockSize,
51  index_t YPerTile,
52  index_t XPerTile,
53  index_t KPerBlockAQ,
54  index_t VecSize,
55  bool PreshuffleQuant>
57 {
58  static_assert(XPerTile % VecSize == 0, "XPerTile must be a multiple of VecSize!");
59  static constexpr index_t warp_size = get_warp_size();
60  static constexpr index_t num_warps = BlockSize / get_warp_size();
61 
62  static constexpr index_t MWarps = BlockGemmShape::BlockWarps::at(number<0>{});
63  static constexpr index_t NWarps = BlockGemmShape::BlockWarps::at(number<1>{});
64  static constexpr index_t KWarps = BlockGemmShape::BlockWarps::at(number<2>{});
65 
66  static constexpr index_t MIterPerWarp = BlockGemmShape::kM / (MWarps * WarpGemm::kM);
67 
68  static_assert(num_warps == MWarps * NWarps * KWarps);
69 
70  // KWarps > 1 isn't supported
71  static_assert(KWarps == 1);
72 
74  {
75  if constexpr(PreshuffleQuant)
76  {
77  // # of elements per thread
78  static_assert(XPerTile >= warp_size && XPerTile % warp_size == 0);
79  constexpr index_t X1 = warp_size;
80  constexpr index_t X0 = XPerTile / warp_size;
81 
82  constexpr index_t Y1 = MWarps;
83  constexpr index_t Y0 = YPerTile / Y1;
90  sequence<0, 0>>{});
91  }
92  else
93  {
94  // # of elements per thread
95  constexpr index_t X = XPerTile;
96 
97  constexpr index_t YR = 1;
98  constexpr index_t Y0 = MIterPerWarp ? MIterPerWarp : 1;
99  constexpr index_t Y1 = MWarps;
100  constexpr index_t Y2 = WarpGemm::kM;
101  static_assert(Y2 >= WarpGemm::kM,
102  "Scales for all rows must be available within the warp.");
103  static_assert(Y0 * Y1 * Y2 == YPerTile, "Y0, Y1, Y2 must cover the blocktile along Y.");
110  sequence<0, 0>>{});
111  }
112  }
114  {
115 
116  constexpr index_t Y0 = YPerTile;
117  constexpr index_t X0 = 1;
118  constexpr index_t X1 = MIterPerWarp ? MIterPerWarp : 1;
119  constexpr index_t X2 = MWarps;
120  constexpr index_t X3 = WarpGemm::kM;
121 
122  static_assert(X3 >= WarpGemm::kM, "Scales for all rows must be available within the warp.");
123  static_assert(X0 * X1 * X2 * X3 == XPerTile,
124  "X0, X1, X2, X3 must cover the blocktile along X.");
125 
132  sequence<1, 0>>{});
133  }
134 };
135 
136 template <typename BlockGemmShape,
137  typename WarpGemm,
138  index_t BlockSize,
139  index_t YPerTile,
140  index_t XPerTile,
141  index_t VecSize>
144 {
145  // TODO: make pattern where below condition does not need to hold - GGemmMultiDSplitk!
146  static_assert(XPerTile % VecSize == 0, "XPerTile must be a multiple of VecSize!");
147  static constexpr index_t warp_size = get_warp_size();
148  static constexpr index_t num_warps = BlockSize / get_warp_size();
149 
150  static constexpr index_t MWarps = BlockGemmShape::BlockWarps::at(number<0>{});
151  static constexpr index_t NWarps = BlockGemmShape::BlockWarps::at(number<1>{});
152  static constexpr index_t KWarps = BlockGemmShape::BlockWarps::at(number<2>{});
153 
154  static constexpr index_t MIterPerWarp = BlockGemmShape::kM / (MWarps * WarpGemm::kM);
155 
156  static_assert(num_warps == MWarps * NWarps * KWarps);
157 
158  // KWarps > 1 isn't supported
159  static_assert(KWarps == 1);
160 
161  // # of elements per thread
162  static constexpr index_t X = XPerTile;
163  static constexpr index_t XR = 2;
164 
165  // Number of iters per warp
166  // MIters are indexed using (Y0, Y1)
167  static constexpr index_t Y0 = MIterPerWarp;
168 
169  // # of warps in Y dim
170  static constexpr index_t Y1 = MWarps;
171 
172  static constexpr index_t Y2 = WarpGemm::kM;
173 
174  static_assert(Y0 * Y1 * Y2 == YPerTile, "Y0, Y1, Y2 must cover the blocktile along Y.");
175 
177  {
184  sequence<0, 0>>{});
185  }
186 };
187 
188 // TODO:: might need to update
189 template <typename BlockGemmShape,
190  typename WarpGemm,
191  index_t BlockSize,
192  index_t YPerTile,
193  index_t XPerTile,
194  index_t YPerQ,
195  bool PreshuffleQuant = false>
197 {
198  static constexpr index_t warp_size = get_warp_size();
199  static constexpr index_t num_warps = BlockSize / get_warp_size();
200 
201  static constexpr index_t MWarps = BlockGemmShape::BlockWarps::at(number<0>{});
202  static constexpr index_t NWarps = BlockGemmShape::BlockWarps::at(number<1>{});
203  static constexpr index_t KWarps = BlockGemmShape::BlockWarps::at(number<2>{});
204 
205  static constexpr index_t NIterPerWarp = BlockGemmShape::kN / (NWarps * WarpGemm::kN);
206 
207  static_assert(num_warps == MWarps * NWarps * KWarps);
208  static_assert(KWarps == 1);
209 
236  {
237  if constexpr(PreshuffleQuant)
238  {
239  constexpr index_t X1 = warp_size;
240  constexpr index_t X0 = XPerTile / warp_size;
241  constexpr index_t Y1 = NWarps;
242  constexpr index_t Y0 = YPerTile / Y1;
243 
250  sequence<0, 0>>{});
251  }
252  else
253  {
254  if constexpr(YPerQ < WarpGemm::kN)
255  {
256  // Case 1: Fine-grained - multiple quantization scales within a single warp
257  constexpr index_t X = XPerTile; // Full X dimension of tile
258  constexpr index_t XR = 1; // No Y replication needed
259  constexpr index_t Y0 = NIterPerWarp; // Iterations per warp in N-dim
260  constexpr index_t Y1 = NWarps; // Number of warps in N-dim
261  constexpr index_t Y2 = WarpGemm::kN / YPerQ; // Number of scales per warp
262  constexpr index_t YR = YPerQ; // Elements per quantization group
263 
264  static_assert(Y0 * Y1 * Y2 == YPerTile,
265  "Y0, Y1, Y2 must cover the blocktile along Y.");
266 
273  sequence<0, 0>>{});
274  }
275  else if constexpr(YPerQ <= WarpGemm::kN * NWarps)
276  {
277  // Case 2: Medium-grained - one quantization scale per warp
278  constexpr auto YR = YPerQ / WarpGemm::kN; // Scale replication factor
279  constexpr auto Y1 = NWarps / YR; // Warps per unique scale
280  constexpr auto Y0 = YPerTile / Y1; // Iterations to cover X dimension
287  sequence<0, 0>>{});
288  }
289  else // XPerQ > WarpGemm::kN * NWarps
290  {
291  // Case 3: Coarse-grained - quantization group spans all warps
292  // All warps in N-dimension share the same quantization scale
299  sequence<0, 0>>{});
300  }
301  }
302  }
303 };
304 
305 template <typename GroupSizes>
307 {
308  static constexpr index_t kM = GroupSizes::at(number<0>{});
309  static constexpr index_t kN = GroupSizes::at(number<1>{});
310  static constexpr index_t kK = GroupSizes::at(number<2>{});
311 
312  [[nodiscard]] CK_TILE_HOST static const std::string GetName()
313  {
314  return concat('_', "quant_group_shape", concat('x', kM, kN, kK));
315  }
316 };
317 
318 } // namespace ck_tile
#define CK_TILE_HOST
Definition: config.hpp:44
#define CK_TILE_HOST_DEVICE
Definition: config.hpp:46
Definition: cluster_descriptor.hpp:13
int32_t index_t
Definition: integer.hpp:9
auto concat(const Ts &... xs) -> std::enable_if_t<!AllConvertibleToStringView< Ts... >, std::string >
Definition: concat.hpp:43
constexpr CK_TILE_HOST_DEVICE auto make_static_tile_distribution(StaticTileDistributionEncoding_)
Definition: tile_distribution.hpp:495
constexpr __device__ index_t get_warp_size()
Definition: get_id.hpp:10
Definition: gemm_group_quant_utils.hpp:307
static constexpr index_t kM
Definition: gemm_group_quant_utils.hpp:308
static constexpr index_t kK
Definition: gemm_group_quant_utils.hpp:310
static constexpr index_t kN
Definition: gemm_group_quant_utils.hpp:309
static CK_TILE_HOST const std::string GetName()
Definition: gemm_group_quant_utils.hpp:312
Definition: integral_constant.hpp:13
Definition: numeric.hpp:81
Definition: sequence.hpp:49
Definition: gemm_group_quant_utils.hpp:144
static constexpr index_t NWarps
Definition: gemm_group_quant_utils.hpp:151
static constexpr CK_TILE_HOST_DEVICE auto make_2d_static_tile_distribution()
Definition: gemm_group_quant_utils.hpp:176
static constexpr index_t MWarps
Definition: gemm_group_quant_utils.hpp:150
static constexpr index_t MIterPerWarp
Definition: gemm_group_quant_utils.hpp:154
static constexpr index_t X
Definition: gemm_group_quant_utils.hpp:162
static constexpr index_t KWarps
Definition: gemm_group_quant_utils.hpp:152
static constexpr index_t Y0
Definition: gemm_group_quant_utils.hpp:167
static constexpr index_t num_warps
Definition: gemm_group_quant_utils.hpp:148
static constexpr index_t Y2
Definition: gemm_group_quant_utils.hpp:172
static constexpr index_t Y1
Definition: gemm_group_quant_utils.hpp:170
static constexpr index_t XR
Definition: gemm_group_quant_utils.hpp:163
static constexpr index_t warp_size
Definition: gemm_group_quant_utils.hpp:147
Definition: gemm_group_quant_utils.hpp:57
static constexpr index_t KWarps
Definition: gemm_group_quant_utils.hpp:64
static constexpr index_t MWarps
Definition: gemm_group_quant_utils.hpp:62
static constexpr CK_TILE_HOST_DEVICE auto make_2d_static_tile_distribution()
Definition: gemm_group_quant_utils.hpp:73
static constexpr index_t warp_size
Definition: gemm_group_quant_utils.hpp:59
static constexpr index_t NWarps
Definition: gemm_group_quant_utils.hpp:63
static constexpr CK_TILE_HOST_DEVICE auto make_2d_static_tile_distribution_transposed()
Definition: gemm_group_quant_utils.hpp:113
static constexpr index_t num_warps
Definition: gemm_group_quant_utils.hpp:60
static constexpr index_t MIterPerWarp
Definition: gemm_group_quant_utils.hpp:66
Definition: gemm_group_quant_utils.hpp:197
static constexpr index_t NIterPerWarp
Definition: gemm_group_quant_utils.hpp:205
static constexpr index_t NWarps
Definition: gemm_group_quant_utils.hpp:202
static constexpr index_t KWarps
Definition: gemm_group_quant_utils.hpp:203
static constexpr CK_TILE_HOST_DEVICE auto make_2d_static_tile_distribution()
Creates a 2D tile distribution for BQ (B-matrix quantization scales)
Definition: gemm_group_quant_utils.hpp:235
static constexpr index_t warp_size
Definition: gemm_group_quant_utils.hpp:198
static constexpr index_t MWarps
Definition: gemm_group_quant_utils.hpp:201
static constexpr index_t num_warps
Definition: gemm_group_quant_utils.hpp:199
Definition: static_encoding_pattern.hpp:108
Definition: tile_distribution_encoding.hpp:26
Definition: tuple.hpp:192