/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/block/blockwise_gemm_smfmac_xdlops.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/block/blockwise_gemm_smfmac_xdlops.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/block/blockwise_gemm_smfmac_xdlops.hpp Source File
blockwise_gemm_smfmac_xdlops.hpp
Go to the documentation of this file.
1 // SPDX-License-Identifier: MIT
2 // Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
3 
4 #pragma once
5 
11 
12 namespace ck {
13 
14 template <index_t MNXdlPerWave, index_t MNWaves, index_t MNPerXdl, typename TileDesc_K0_MN_K1>
15 __host__ __device__ static constexpr auto
16 MakeGemmMmaTileDescriptor_MN0_MN1_MN2_K(const TileDesc_K0_MN_K1&)
17 {
18  constexpr index_t K0 = TileDesc_K0_MN_K1{}.GetLength(Number<0>{});
19  constexpr index_t K1 = TileDesc_K0_MN_K1{}.GetLength(Number<2>{});
20 
22  TileDesc_K0_MN_K1{},
23  make_tuple(make_merge_transform_v3_division_mod(make_tuple(Number<K0>{}, Number<K1>{})),
25  make_tuple(Number<MNXdlPerWave>{}, Number<MNWaves>{}, Number<MNPerXdl>{}))),
26  make_tuple(Sequence<0, 2>{}, Sequence<1>{}),
27  make_tuple(Sequence<3>{}, Sequence<0, 1, 2>{}));
28 }
29 
30 template <index_t BlockSize,
31  typename FloatA,
32  typename FloatB,
33  typename FloatAcc,
34  typename AK0MK1BlockDesc,
35  typename BK0NK1BlockDesc,
36  index_t MPerXDL,
37  index_t NPerXDL,
38  index_t MRepeat,
39  index_t NRepeat,
40  index_t KPack,
41  typename ComputeTypeA = FloatA,
42  typename ComputeTypeB = FloatB>
44 {
45  static constexpr auto I0 = Number<0>{};
46  static constexpr auto I1 = Number<1>{};
47  static constexpr auto I2 = Number<2>{};
48  static constexpr auto I3 = Number<3>{};
49 
51 
52  static constexpr index_t WaveSize = get_warp_size();
53 
54  static constexpr index_t MPerBlock = AK0MK1BlockDesc{}.GetLength(I1);
55  static constexpr index_t NPerBlock = BK0NK1BlockDesc{}.GetLength(I1);
56  static constexpr index_t KPerBlock =
57  BK0NK1BlockDesc{}.GetLength(I0) * BK0NK1BlockDesc{}.GetLength(I2);
58 
59  static constexpr index_t A_K0 = AK0MK1BlockDesc{}.GetLength(I0);
60  static constexpr index_t B_K0 = BK0NK1BlockDesc{}.GetLength(I0);
61  static constexpr index_t A_K1 = AK0MK1BlockDesc{}.GetLength(I2);
62  static constexpr index_t B_K1 = BK0NK1BlockDesc{}.GetLength(I2);
63 
64  static constexpr auto xdlops_gemm =
66 
67  static constexpr index_t KPerThread = KPerBlock / xdlops_gemm.K0PerXdlops;
68 
69  static constexpr index_t MWaves = MPerBlock / (MRepeat * MPerXDL);
70  static constexpr index_t NWaves = NPerBlock / (NRepeat * NPerXDL);
71 
73  FloatAcc,
74  MRepeat * NRepeat,
75  xdlops_gemm.GetRegSizePerXdlops(),
76  true>
78 
79  __host__ __device__ constexpr auto& GetCThreadBuffer() { return c_thread_buf_; }
80 
81  __device__ static auto GetWaveIdx()
82  {
83  const index_t thread_id = ThisThreadBlock::GetThreadId();
84 
85  constexpr auto threadid_to_wave_idx_adaptor = make_single_stage_tensor_adaptor(
89 
90  return threadid_to_wave_idx_adaptor.CalculateBottomIndex(make_multi_index(thread_id));
91  }
92 
93  __device__ static auto CalculateAThreadOriginDataIndex()
94  {
95  const auto wave_idx = GetWaveIdx();
96  const auto waveId_m = wave_idx[I0];
97  const auto xdlops_a_idx = xdlops_gemm.CalculateAThreadOriginDataIndex();
98 
99  return make_tuple(0, waveId_m, xdlops_a_idx[I1], KPerThread * xdlops_a_idx[I0]);
100  }
101 
102  __device__ static auto CalculateBThreadOriginDataIndex()
103  {
104  const auto wave_idx = GetWaveIdx();
105  const auto waveId_n = wave_idx[I1];
106  const auto xdlops_b_idx = xdlops_gemm.CalculateBThreadOriginDataIndex();
107 
108  return make_tuple(0, waveId_n, xdlops_b_idx[I1], KPerThread * xdlops_b_idx[I0]);
109  }
110 
111  template <index_t m0, index_t n0, index_t xdlops_i, index_t blk_i>
112  __device__ static auto
114  {
115  const auto wave_idx = GetWaveIdx();
116  const auto waveId_m = wave_idx[I0];
117  const auto waveId_n = wave_idx[I1];
118 
119  const auto blk_idx = xdlops_gemm.GetBeginOfThreadBlk(xdlops_i, blk_i);
120 
121  constexpr auto mrepeat_mwave_mperxdl_to_m_adaptor = make_single_stage_tensor_adaptor(
122  make_tuple(make_unmerge_transform(make_tuple(MRepeat, MWaves, MPerXDL))),
125 
126  constexpr auto nrepeat_nwave_nperxdl_to_n_adaptor = make_single_stage_tensor_adaptor(
127  make_tuple(make_unmerge_transform(make_tuple(NRepeat, NWaves, NPerXDL))),
130 
131  const index_t c_thread_m = mrepeat_mwave_mperxdl_to_m_adaptor.CalculateBottomIndex(
132  make_tuple(m0, waveId_m, blk_idx[I0]))[I0];
133  const index_t c_thread_n = nrepeat_nwave_nperxdl_to_n_adaptor.CalculateBottomIndex(
134  make_tuple(n0, waveId_n, blk_idx[I1]))[I0];
135 
136  return make_tuple(c_thread_m, c_thread_n);
137  }
138 
139  template <index_t m0, index_t n0, index_t xdlops_i, index_t blk_i>
140  __device__ static auto
142  {
143  const auto wave_idx = GetWaveIdx();
144  const auto waveId_m = wave_idx[I0];
145  const auto waveId_n = wave_idx[I1];
146 
147  const auto blk_idx = xdlops_gemm.GetBeginOfThreadBlk4D(xdlops_i, blk_i);
148 
149  return make_tuple(Number<m0>{},
150  Number<n0>{},
151  waveId_m,
152  waveId_n,
153  blk_idx[I0],
154  blk_idx[I1],
155  blk_idx[I2],
156  blk_idx[I3]);
157  }
158 
160  {
161  static_assert(AK0MK1BlockDesc::IsKnownAtCompileTime() &&
162  BK0NK1BlockDesc::IsKnownAtCompileTime(),
163  "wrong! Desc should be known at compile-time");
164 
165  static_assert(ThisThreadBlock::GetNumOfThread() == MWaves * NWaves * WaveSize,
166  "ThisThreadBlock::GetNumOfThread() != MWaves * NWaves * WaveSize\n");
167 
168  static_assert(MPerBlock % (MPerXDL * MRepeat) == 0,
169  "MPerBlock must be divisible by MPerXDL * MRepeat");
170  static_assert(NPerBlock % (NPerXDL * NRepeat) == 0,
171  "NPerBlock must be divisible by NPerXDL * NRepeat");
172 
173  static_assert(
174  KPack % (16 * sizeof(ComputeTypeA)) == 0,
175  "KPack must be divisbile by number of elements processed in single smfmac instruction");
176  }
177 
178  __host__ __device__ static constexpr auto GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2()
179  {
180  constexpr auto c_m0_m1_m2_n_tblk_lens = xdlops_gemm.GetCM0M1M2NThreadBlkLengths();
181 
182  constexpr auto M0 = c_m0_m1_m2_n_tblk_lens[I0];
183  constexpr auto M1 = c_m0_m1_m2_n_tblk_lens[I1];
184  constexpr auto M2 = c_m0_m1_m2_n_tblk_lens[I2];
185  constexpr auto N = c_m0_m1_m2_n_tblk_lens[I3];
186 
188  make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, I1, I1, M0, M1, M2, N));
189  }
190 
191  __host__ __device__ static constexpr auto GetCThreadDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2()
192  {
193  constexpr auto c_m0_m1_m2_n_tblk_lens = xdlops_gemm.GetCM0M1M2NThreadBlkLengths();
194 
195  constexpr auto M0 = c_m0_m1_m2_n_tblk_lens[I0];
196  constexpr auto M1 = c_m0_m1_m2_n_tblk_lens[I1];
197  constexpr auto M2 = c_m0_m1_m2_n_tblk_lens[I2];
198  constexpr auto N = c_m0_m1_m2_n_tblk_lens[I3];
199 
201  make_tuple(I1, Number<MRepeat>{}, Number<NRepeat>{}, I1, I1, M0, M1, M2, N));
202  }
203 
204  __host__ __device__ static constexpr auto GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2()
205  {
206  constexpr auto c_block_desc_m0_n0_m1_n1_m2_n2 =
208  Number<NRepeat>{},
209  Number<MWaves>{},
210  Number<NWaves>{},
211  Number<MPerXDL>{},
212  Number<NPerXDL>{}));
213 
214  return xdlops_gemm.MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_block_desc_m0_n0_m1_n1_m2_n2);
215  }
216 
217  __host__ __device__ static constexpr auto GetCBlockDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2()
218  {
219  constexpr auto c_block_desc_g_m0_n0_m1_n1_m2_n2 =
221  Number<MRepeat>{},
222  Number<NRepeat>{},
223  Number<MWaves>{},
224  Number<NWaves>{},
225  Number<MPerXDL>{},
226  Number<NPerXDL>{}));
227 
228  return xdlops_gemm.MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(
229  c_block_desc_g_m0_n0_m1_n1_m2_n2);
230  }
231 
232  template <typename CGridDesc_M_N>
233  __host__ __device__ static constexpr auto
234  MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(const CGridDesc_M_N& c_grid_desc_m_n)
235  {
236  const auto M = c_grid_desc_m_n.GetLength(I0);
237  const auto N = c_grid_desc_m_n.GetLength(I1);
238 
239  const auto c_grid_desc_m0_n0_m1_n1_m2_n2 = transform_tensor_descriptor(
240  c_grid_desc_m_n,
241  make_tuple(make_unmerge_transform(make_tuple(M / (MWaves * MPerXDL), MWaves, MPerXDL)),
242  make_unmerge_transform(make_tuple(N / (NWaves * NPerXDL), NWaves, NPerXDL))),
245 
246  return xdlops_gemm.MakeCDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(c_grid_desc_m0_n0_m1_n1_m2_n2);
247  }
248 
249  template <typename CGridDesc_G_M_N>
250  __host__ __device__ static constexpr auto
251  MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(const CGridDesc_G_M_N& c_grid_desc_g_m_n)
252  {
253  const auto G = c_grid_desc_g_m_n.GetLength(I0);
254  const auto M = c_grid_desc_g_m_n.GetLength(I1);
255  const auto N = c_grid_desc_g_m_n.GetLength(I2);
256 
257  const auto c_grid_desc_g_m0_n0_m1_n1_m2_n2 = transform_tensor_descriptor(
258  c_grid_desc_g_m_n,
260  make_unmerge_transform(make_tuple(M / (MWaves * MPerXDL), MWaves, MPerXDL)),
261  make_unmerge_transform(make_tuple(N / (NWaves * NPerXDL), NWaves, NPerXDL))),
264 
265  return xdlops_gemm.MakeCDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(
266  c_grid_desc_g_m0_n0_m1_n1_m2_n2);
267  }
268 
269  __host__ __device__ static constexpr auto MakeABlockDescriptor_M0_M1_M2_K()
270  {
272  AK0MK1BlockDesc{},
273  make_tuple(
279  }
280 
281  __host__ __device__ static constexpr auto MakeBBlockDescriptor_N0_N1_N2_K()
282  {
284  BK0NK1BlockDesc{},
285  make_tuple(
291  }
292 
295 
296  // Prepares data in a_thread_buf by squeezing values by ommiting zeros to adjust it to 2:4
297  // structural sparsity. The indexes of non-zero elements are stored in idx_buf and used later in
298  // smfmac instruction
299  template <typename AThreadBuf, typename IdxBuf, int32_t num_elems>
300  __device__ void SetIdxSqueezeA(AThreadBuf& a_thread_buf, IdxBuf& idx_buf)
301  {
302  static constexpr int32_t bit_clear_masks[4] = {0b11, 0b1100, 0b110000, 0b11000000};
303  static constexpr int32_t processed_elems = 16 / sizeof(ComputeTypeA);
304 
306  constexpr int idx_reg_num = i / (16 * sizeof(ComputeTypeA));
307  constexpr int idx_reg_part = (i % 32) / processed_elems;
308 
310  static_for<0, processed_elems, 1>{}([&](auto j) {
311  a_thread_vec.template AsType<ComputeTypeA>()(j) = a_thread_buf
312  [Number<a_thread_desc_.CalculateOffset(make_tuple(0, 0, 0, i + j))>{}];
313  });
314 
315  uint8_t idx = 0b11101110; // set to last 2 elems for both 4-elems subgroups by default
316  for(int j = 0; j < processed_elems; j += 4)
317  {
318  int32_t a_pos = idx_reg_part * processed_elems + j;
319  int32_t nonzero_pos = 0;
320  ComputeTypeA nonzero_elems[2] = {a_thread_vec[j + 2], a_thread_vec[j + 3]};
321  for(int k = 0; k < 3; k += 1)
322  {
323  if(a_thread_vec[j + k] != 0.0f)
324  {
325  nonzero_elems[nonzero_pos] = a_thread_vec[j + k];
326  idx &= ~bit_clear_masks[j / 2 + nonzero_pos];
327  idx |= k << 2 * (j / 2 + nonzero_pos);
328  ++nonzero_pos;
329  }
330  }
331  a_thread_vec[j / 2] = nonzero_elems[0];
332  a_thread_vec[j / 2 + 1] = nonzero_elems[1];
333  }
334  IdxBuf[idx_reg_num].AsType<int8x4_t>()[Number<idx_reg_part>{}] = idx;
335 
336  static_for<0, processed_elems / 2, 1>{}([&](auto j) {
337  a_thread_buf[Number<a_thread_desc_.CalculateOffset(
338  make_tuple(0, 0, 0, i / 2 + j))>{}] = a_thread_vec[j];
339  });
340  });
341  }
342 
343  template <typename ABlockBuffer, typename BBlockBuffer, typename CThreadBuffer>
344  __device__ void Run(const ABlockBuffer& a_block_buf,
345  const BBlockBuffer& b_block_buf,
346  CThreadBuffer& c_thread_buf) const
347  {
348  auto a_thread_buf = make_static_buffer<AddressSpaceEnum::Vgpr, ComputeTypeA>(
349  a_thread_desc_.GetElementSpaceSize());
350  auto b_thread_buf = make_static_buffer<AddressSpaceEnum::Vgpr, ComputeTypeB>(
351  b_thread_desc_.GetElementSpaceSize());
352  static constexpr int32_t elems_per_idx = 16 * sizeof(ComputeTypeA);
353  auto idx_buf = make_static_buffer<AddressSpaceEnum::Vgpr, int32_t>(
354  (a_thread_desc_.GetElementSpaceSize() + elems_per_idx - 1) / elems_per_idx);
355 
356  static_for<0, MRepeat, 1>{}([&](auto m0) {
357  // read A
359  make_tuple(m0, I0, I0, I0),
360  a_block_buf,
362  make_tuple(I0, I0, I0, I0),
363  a_thread_buf);
364 
365  SetIdxSqueezeA(a_thread_buf, idx_buf, a_thread_desc_.GetElementSpaceSize());
366 
367  static_for<0, NRepeat, 1>{}([&](auto n0) {
368  // read B
370  make_tuple(n0, I0, I0, I0),
371  b_block_buf,
373  make_tuple(I0, I0, I0, I0),
374  b_thread_buf);
375 
376  static_for<0, KPerThread, KPack>{}([&](auto k) {
377  // a_thread_vec is smaller because it's structurally sparse 2:4
378  vector_type<ComputeTypeA, KPack / 2> a_thread_vec;
380  vector_type<int32_t, KPack / elems_per_idx> idx_vec;
381 
382  static_for<0, KPack / 2, 1>{}([&](auto i) {
383  a_thread_vec.template AsType<ComputeTypeA>()(i) =
384  a_thread_buf[Number<a_thread_desc_.CalculateOffset(
385  make_tuple(0, 0, 0, k / 2 + i))>{}];
386  });
387 
388  static_for<0, KPack, 1>{}([&](auto i) {
389  b_thread_vec.template AsType<ComputeTypeB>()(2 * i) = b_thread_buf
390  [Number<b_thread_desc_.CalculateOffset(make_tuple(0, 0, 0, k + i))>{}];
391  });
392 
393  static_for<0, KPack / elems_per_idx, 1>{}([&](auto i) {
394  idx_vec.template AsType<int32_t>()(i) = idx_buf[k / elems_per_idx + i];
395  });
396 
397  // A is smaller because it's structurally sparse 2:4
398  using mfma_input_type_a =
399  typename vector_type<ComputeTypeA, xdlops_gemm.K1PerXdlops / 2>::type;
400  using mfma_input_type_b =
401  typename vector_type<ComputeTypeB, xdlops_gemm.K1PerXdlops>::type;
402  using mfma_input_type_idx = typename vector_type<int32_t, 1>::type;
403 
404  constexpr index_t c_offset =
405  c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
406 
407  xdlops_gemm.Run(a_thread_vec.template AsType<mfma_input_type_a>(),
408  b_thread_vec.template AsType<mfma_input_type_b>(),
409  idx_vec.template AsType<mfma_input_type_idx>(),
410  c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
411  });
412  });
413  });
414  }
415 
416  protected:
417  // A[M0, M1, M2, KPerThread]
418  static constexpr auto a_thread_desc_ =
420 
421  // B[N0, N1, N2, KPerThread]
422  static constexpr auto b_thread_desc_ =
424 
425  // C[M, N, NumRegXdlops]
427  make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, xdlops_gemm.GetRegSizePerXdlops()));
428 
430  ComputeTypeA,
431  decltype(a_block_desc_m0_m1_m2_k),
432  decltype(a_thread_desc_),
435  3,
436  A_K1,
437  A_K1>;
438 
440  ComputeTypeB,
441  decltype(b_block_desc_n0_n1_n2_k),
442  decltype(b_thread_desc_),
445  3,
446  B_K1,
447  B_K1>;
448 
451 };
452 
453 } // namespace ck
Definition: ck.hpp:264
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
__host__ constexpr __device__ index_t get_warp_size()
Definition: get_id.hpp:10
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
__host__ constexpr __device__ auto make_merge_transform_v3_division_mod(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:84
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:429
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
int32_t index_t
Definition: ck.hpp:289
typename vector_type< int8_t, 4 >::type int8x4_t
Definition: data_type.hpp:2514
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
Definition: blockwise_gemm_smfmac_xdlops.hpp:44
static constexpr index_t KPerBlock
Definition: blockwise_gemm_smfmac_xdlops.hpp:56
static constexpr index_t A_K1
Definition: blockwise_gemm_smfmac_xdlops.hpp:61
static constexpr auto c_thread_desc_
Definition: blockwise_gemm_smfmac_xdlops.hpp:426
__host__ __device__ BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_v1()
Definition: blockwise_gemm_smfmac_xdlops.hpp:159
static constexpr auto I2
Definition: blockwise_gemm_smfmac_xdlops.hpp:47
static __device__ auto CalculateBThreadOriginDataIndex()
Definition: blockwise_gemm_smfmac_xdlops.hpp:102
static constexpr index_t WaveSize
Definition: blockwise_gemm_smfmac_xdlops.hpp:52
__host__ static constexpr __device__ auto MakeCGridDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2(const CGridDesc_G_M_N &c_grid_desc_g_m_n)
Definition: blockwise_gemm_smfmac_xdlops.hpp:251
static constexpr index_t KPerThread
Definition: blockwise_gemm_smfmac_xdlops.hpp:67
__host__ static constexpr __device__ auto MakeCGridDescriptor_M0_N0_M1_N1_M2_M3_M4_N2(const CGridDesc_M_N &c_grid_desc_m_n)
Definition: blockwise_gemm_smfmac_xdlops.hpp:234
static constexpr index_t B_K1
Definition: blockwise_gemm_smfmac_xdlops.hpp:62
__host__ static constexpr __device__ auto GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2()
Definition: blockwise_gemm_smfmac_xdlops.hpp:178
static __device__ auto CalculateAThreadOriginDataIndex()
Definition: blockwise_gemm_smfmac_xdlops.hpp:93
static constexpr index_t MPerBlock
Definition: blockwise_gemm_smfmac_xdlops.hpp:54
StaticBufferTupleOfVector< AddressSpaceEnum::Vgpr, FloatAcc, MRepeat *NRepeat, xdlops_gemm.GetRegSizePerXdlops(), true > c_thread_buf_
Definition: blockwise_gemm_smfmac_xdlops.hpp:77
static constexpr auto b_block_desc_n0_n1_n2_k
Definition: blockwise_gemm_smfmac_xdlops.hpp:294
static constexpr index_t NPerBlock
Definition: blockwise_gemm_smfmac_xdlops.hpp:55
static __device__ auto CalculateCThreadOriginDataIndex8D(Number< m0 >, Number< n0 >, Number< xdlops_i >, Number< blk_i >)
Definition: blockwise_gemm_smfmac_xdlops.hpp:141
static constexpr auto I0
Definition: blockwise_gemm_smfmac_xdlops.hpp:45
static constexpr auto a_thread_desc_
Definition: blockwise_gemm_smfmac_xdlops.hpp:418
__device__ void SetIdxSqueezeA(AThreadBuf &a_thread_buf, IdxBuf &idx_buf)
Definition: blockwise_gemm_smfmac_xdlops.hpp:300
BThreadCopy b_thread_copy_
Definition: blockwise_gemm_smfmac_xdlops.hpp:450
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: blockwise_gemm_smfmac_xdlops.hpp:50
__host__ static constexpr __device__ auto MakeBBlockDescriptor_N0_N1_N2_K()
Definition: blockwise_gemm_smfmac_xdlops.hpp:281
__host__ static constexpr __device__ auto MakeABlockDescriptor_M0_M1_M2_K()
Definition: blockwise_gemm_smfmac_xdlops.hpp:269
static constexpr auto a_block_desc_m0_m1_m2_k
Definition: blockwise_gemm_smfmac_xdlops.hpp:293
AThreadCopy a_thread_copy_
Definition: blockwise_gemm_smfmac_xdlops.hpp:449
__host__ constexpr __device__ auto & GetCThreadBuffer()
Definition: blockwise_gemm_smfmac_xdlops.hpp:79
__host__ static constexpr __device__ auto GetCBlockDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2()
Definition: blockwise_gemm_smfmac_xdlops.hpp:217
__host__ static constexpr __device__ auto GetCThreadDescriptor_G_M0_N0_M1_N1_M2_M3_M4_N2()
Definition: blockwise_gemm_smfmac_xdlops.hpp:191
static constexpr index_t NWaves
Definition: blockwise_gemm_smfmac_xdlops.hpp:70
static constexpr auto xdlops_gemm
Definition: blockwise_gemm_smfmac_xdlops.hpp:64
static constexpr index_t B_K0
Definition: blockwise_gemm_smfmac_xdlops.hpp:60
static constexpr auto b_thread_desc_
Definition: blockwise_gemm_smfmac_xdlops.hpp:422
static constexpr index_t A_K0
Definition: blockwise_gemm_smfmac_xdlops.hpp:59
__device__ void Run(const ABlockBuffer &a_block_buf, const BBlockBuffer &b_block_buf, CThreadBuffer &c_thread_buf) const
Definition: blockwise_gemm_smfmac_xdlops.hpp:344
__host__ static constexpr __device__ auto GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2()
Definition: blockwise_gemm_smfmac_xdlops.hpp:204
static __device__ auto CalculateCThreadOriginDataIndex(Number< m0 >, Number< n0 >, Number< xdlops_i >, Number< blk_i >)
Definition: blockwise_gemm_smfmac_xdlops.hpp:113
static constexpr auto I3
Definition: blockwise_gemm_smfmac_xdlops.hpp:48
static constexpr auto I1
Definition: blockwise_gemm_smfmac_xdlops.hpp:46
static __device__ auto GetWaveIdx()
Definition: blockwise_gemm_smfmac_xdlops.hpp:81
static constexpr index_t MWaves
Definition: blockwise_gemm_smfmac_xdlops.hpp:69
Definition: sequence.hpp:43
Definition: smfmac_xdlops_gemm.hpp:215
Definition: static_buffer.hpp:75
static __device__ index_t GetThreadId()
Definition: thread_group.hpp:19
static constexpr __device__ index_t GetNumOfThread()
Definition: thread_group.hpp:15
Definition: threadwise_tensor_slice_transfer.hpp:1001
__device__ void Run(const SrcDesc &, const SrcRefToOriginDisplacement &, const SrcBuffer &src_buf, const DstDesc &, const DstOriginIdx &, DstBuffer &dst_buf) const
Definition: threadwise_tensor_slice_transfer.hpp:1036
Definition: integral_constant.hpp:10
Definition: functional2.hpp:31
Definition: data_type.hpp:347