/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/grid/gridwise_gemm_multiple_d_xdl_cshuffle.hpp Source File
gridwise_gemm_multiple_d_xdl_cshuffle.hpp
Go to the documentation of this file.
1 // SPDX-License-Identifier: MIT
2 // Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
3 
4 #pragma once
5 
17 
20 
21 namespace ck {
22 
23 // GEMM:
24 // input : A[M, K]
25 // input : B[N, K]
26 // input : D0[M, N], D1[M, N], ...
27 // output : E[M, N]
28 // C = a_op(A) * b_op(B)
29 // E = cde_op(C, D0, D1, ...)
30 // Assume:
31 // D0, D1, ... and E have the same layout
32 template <typename ADataType,
33  typename BDataType,
34  typename AComputeDataType_,
35  typename AccDataType,
36  typename CShuffleDataType,
37  typename DsDataType,
38  typename EDataType,
39  typename AElementwiseOperation,
40  typename BElementwiseOperation,
41  typename CDEElementwiseOperation,
42  InMemoryDataOperationEnum EGlobalMemoryDataOperation,
43  index_t NumGemmKPrefetchStage,
44  index_t BlockSize,
45  index_t MPerBlock,
46  index_t NPerBlock,
47  index_t KPerBlock,
48  index_t AK1Value,
49  index_t BK1Value,
50  index_t MPerXdl,
51  index_t NPerXdl,
52  index_t MXdlPerWave,
53  index_t NXdlPerWave,
54  typename ABlockTransferThreadClusterLengths_AK0_M_AK1,
55  typename ABlockTransferThreadClusterArrangeOrder,
56  typename ABlockTransferSrcAccessOrder,
57  index_t ABlockTransferSrcVectorDim,
58  index_t ABlockTransferSrcScalarPerVector,
59  index_t ABlockTransferDstScalarPerVector_AK1,
60  bool AThreadTransferSrcResetCoordinateAfterRun,
61  index_t ABlockLdsExtraM,
62  typename BBlockTransferThreadClusterLengths_BK0_N_BK1,
63  typename BBlockTransferThreadClusterArrangeOrder,
64  typename BBlockTransferSrcAccessOrder,
65  index_t BBlockTransferSrcVectorDim,
66  index_t BBlockTransferSrcScalarPerVector,
67  index_t BBlockTransferDstScalarPerVector_BK1,
68  bool BThreadTransferSrcResetCoordinateAfterRun,
69  index_t BBlockLdsExtraN,
70  index_t CShuffleMXdlPerWavePerShuffle,
71  index_t CShuffleNXdlPerWavePerShuffle,
72  typename CDEBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
73  index_t CDEShuffleBlockTransferScalarPerVector_NPerBlock,
74  LoopScheduler LoopSched,
76  typename BComputeDataType_ = AComputeDataType_>
78 {
79  static constexpr index_t NumDTensor = DsDataType::Size();
80 
82 
83  static constexpr auto I0 = Number<0>{};
84  static constexpr auto I1 = Number<1>{};
85  static constexpr auto I2 = Number<2>{};
86  static constexpr auto I3 = Number<3>{};
87  static constexpr auto I4 = Number<4>{};
88  static constexpr auto I5 = Number<5>{};
89  static constexpr auto I6 = Number<6>{};
90  static constexpr auto I7 = Number<7>{};
91 
92  // K1 should be Number<...>
93  static constexpr auto AK1 = Number<AK1Value>{};
94  static constexpr auto BK1 = Number<BK1Value>{};
95  static constexpr auto AK0PerBlock = Number<KPerBlock / AK1Value>{};
96  static constexpr auto BK0PerBlock = Number<KPerBlock / BK1Value>{};
97 
99 
101  decltype(GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>())>;
102 
103 #if CK_GFX90A_DENORM_WORKAROUND
104  using AComputeDataType =
106  using BComputeDataType =
108 #else
109  using AComputeDataType = AComputeDataType_;
110  using BComputeDataType = BComputeDataType_;
111 #endif
112 
113  __host__ __device__ static constexpr auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1()
114  {
115  // A matrix in LDS memory, dst of blockwise copy
119  }
120 
121  __host__ __device__ static constexpr auto GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1()
122  {
123  // B matrix in LDS memory, dst of blockwise copy
127  }
128 
129  __host__ __device__ static constexpr auto
131  {
132  constexpr index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
133  constexpr index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
134 
135  constexpr auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
137  make_tuple(I1,
139  I1,
141 
142  return c_shuffle_block_desc_mblock_mperblock_nblock_nperblock;
143  }
144 
145  // ck::Tuple<const D0DataType*, const D1DataType*, ...>
146  static constexpr auto MakeDsGridPointer()
147  {
148  return generate_tuple(
149  [&](auto i) {
150  using DDataType = remove_cvref_t<tuple_element_t<i.value, DsDataType>>;
151 
152  return static_cast<const DDataType*>(nullptr);
153  },
155  }
156 
157  __host__ __device__ static constexpr index_t GetSharedMemoryNumberOfByte()
158  {
159  // LDS allocation for A and B: be careful of alignment
160  constexpr auto a_block_desc_ak0_m_ak1 = GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1();
161  constexpr auto b_block_desc_bk0_n_bk1 = GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1();
162 
163  // lds max alignment
164  constexpr auto max_lds_align = math::lcm(AK1, BK1);
165 
166  constexpr auto a_block_space_size_aligned = math::integer_least_multiple(
167  a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
168 
169  constexpr auto b_block_space_size_aligned = math::integer_least_multiple(
170  b_block_desc_bk0_n_bk1.GetElementSpaceSize(), max_lds_align);
171 
172  // LDS allocation for C shuffle in LDS
173  constexpr auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
175 
176  constexpr auto c_block_size =
177  c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize();
178 
179  return math::max(a_block_space_size_aligned * sizeof(AComputeDataType) +
180  b_block_space_size_aligned * sizeof(BComputeDataType),
181  c_block_size * sizeof(CShuffleDataType));
182  }
183 
184  // A desc for source in blockwise copy
185  template <typename AGridDesc_M_K>
186  __host__ __device__ static constexpr auto
187  MakeDefaultAGridDescriptor_AK0_M_AK1(const AGridDesc_M_K& a_grid_desc_m_k)
188  {
189  const auto M = a_grid_desc_m_k.GetLength(I0);
190  const auto K = a_grid_desc_m_k.GetLength(I1);
191 
192  const auto AK0 = K / AK1;
193 
194  return transform_tensor_descriptor(a_grid_desc_m_k,
199  }
200 
201  // B desc for source in blockwise copy
202  template <typename BGridDesc_N_K>
203  __host__ __device__ static constexpr auto
204  MakeDefaultBGridDescriptor_BK0_N_BK1(const BGridDesc_N_K& b_grid_desc_n_k)
205  {
206  const auto N = b_grid_desc_n_k.GetLength(I0);
207  const auto K = b_grid_desc_n_k.GetLength(I1);
208 
209  const auto BK0 = K / BK1;
210 
211  return transform_tensor_descriptor(b_grid_desc_n_k,
216  }
217 
218  // E desc for destination in blockwise copy
219  template <typename EGridDesc_M_N>
220  __host__ __device__ static constexpr auto
221  MakeEGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const EGridDesc_M_N& e_grid_desc_m_n)
222  {
223  const auto M = e_grid_desc_m_n.GetLength(I0);
224  const auto N = e_grid_desc_m_n.GetLength(I1);
225 
226  const auto MBlock = M / MPerBlock;
227  const auto NBlock = N / NPerBlock;
228 
229  const auto e_grid_desc_mblock_mperblock_nblock_nperblock = transform_tensor_descriptor(
230  e_grid_desc_m_n,
235 
236  return e_grid_desc_mblock_mperblock_nblock_nperblock;
237  }
238 
239  // Ds desc for source in blockwise copy
240  template <typename DsGridDesc_M_N>
241  __host__ __device__ static constexpr auto
242  MakeDsGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const DsGridDesc_M_N& ds_grid_desc_m_n)
243  {
244  return generate_tuple(
245  [&](auto i) {
246  return MakeEGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(ds_grid_desc_m_n[i]);
247  },
249  }
250 
251  // return block_id to E matrix tile idx (m0, n0) mapping
252  template <typename EGridDesc_M_N>
253  __host__ __device__ static constexpr auto
254  MakeDefaultBlock2ETileMap(const EGridDesc_M_N& e_grid_desc_m_n)
255  {
257  e_grid_desc_m_n);
258  }
259 
260  template <typename ALayout, typename BLayout, typename ELayout>
261  __host__ __device__ static bool
263  {
264  // Check if the vector dim is K1 or M|N
265  const auto A_vector_dim_size = ABlockTransferSrcVectorDim == 2 ? KRaw : MRaw;
266  const auto B_vector_dim_size = BBlockTransferSrcVectorDim == 2 ? KRaw : NRaw;
267  const auto E_vector_dim_size = NRaw;
268 
269  // check vector load for A tensor
270  if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ALayout>)
271  {
272  if(!(A_vector_dim_size == KRaw &&
273  A_vector_dim_size % ABlockTransferSrcScalarPerVector == 0))
274  return false;
275  }
276  else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ALayout>)
277  {
278  if(!(A_vector_dim_size == MRaw &&
279  A_vector_dim_size % ABlockTransferSrcScalarPerVector == 0))
280  return false;
281  }
282  else
283  {
284  return false;
285  }
286 
287  if constexpr(is_same_v<tensor_layout::gemm::RowMajor, BLayout>)
288  {
289  if(!(B_vector_dim_size == NRaw &&
290  B_vector_dim_size % BBlockTransferSrcScalarPerVector == 0))
291  return false;
292  }
293  else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, BLayout>)
294  {
295  if(!(B_vector_dim_size == KRaw &&
296  B_vector_dim_size % BBlockTransferSrcScalarPerVector == 0))
297  return false;
298  }
299  else
300  {
301  return false;
302  }
303 
304  if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ELayout>)
305  {
306  if(!(E_vector_dim_size == NRaw &&
307  E_vector_dim_size % CDEShuffleBlockTransferScalarPerVector_NPerBlock == 0))
308  return false;
309  }
310  else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ELayout>)
311  {
312  if(!(E_vector_dim_size == NRaw &&
313  CDEShuffleBlockTransferScalarPerVector_NPerBlock == 1))
314  return false;
315  }
316  else
317  {
318  return false;
319  }
320 
321  return true;
322  }
323 
324  template <typename AGridDesc_M_K,
325  typename BGridDesc_N_K,
326  typename DsGridDesc_M_N,
327  typename EGridDesc_M_N,
328  typename Block2ETileMap>
329  __host__ __device__ static constexpr bool CheckValidity(const AGridDesc_M_K& a_grid_desc_m_k,
330  const BGridDesc_N_K& b_grid_desc_n_k,
331  const DsGridDesc_M_N& ds_grid_desc_m_n,
332  const EGridDesc_M_N& e_grid_desc_m_n,
333  [[maybe_unused]] const Block2ETileMap&)
334  {
335  static_assert((MPerBlock % (MPerXdl * MXdlPerWave) == 0) &&
336  (NPerBlock % (NXdlPerWave * NPerXdl)) == 0,
337  "Invalid tuning param!");
338  static_assert(KPerBlock % AK1Value == 0 && KPerBlock % BK1Value == 0,
339  "KPerBlock must be divisible by AK1Value and BK1Value!");
340 
341  const auto M = a_grid_desc_m_k.GetLength(I0);
342  const auto N = b_grid_desc_n_k.GetLength(I0);
343  const auto AK = a_grid_desc_m_k.GetLength(I1);
344  const auto BK = b_grid_desc_n_k.GetLength(I1);
345 
346  // check consistency of desc
347  if(!(M == e_grid_desc_m_n.GetLength(I0) && N == e_grid_desc_m_n.GetLength(I1) && AK == BK))
348  {
349  return false;
350  }
351  bool valid = true;
352 
353  static_for<0, NumDTensor, 1>{}([&](auto i) {
354  valid = valid && (M == ds_grid_desc_m_n[i].GetLength(I0) &&
355  N == ds_grid_desc_m_n[i].GetLength(I1));
356  });
357 
358  if(!valid)
359  {
360  return false;
361  }
362 
363  // check tile size
364  if(!(M % MPerBlock == 0 && N % NPerBlock == 0 && AK % KPerBlock == 0))
365  {
366  return false;
367  }
368 
369  // check gridwise gemm pipeline
370  const auto num_k_loop = AK / KPerBlock;
371  if(!GridwiseGemmPipe::IsSupported(num_k_loop))
372  {
373  return false;
374  }
375 
376  // check block-to-E-tile
377  // if(!block_2_etile_map.CheckValidity(e_grid_desc_m_n))
378  //{
379  // return false;
380  //}
381 
382  // TODO: also check validity of all components (blockwise-copy, threadwise-copy, etc)
383  // check tensor size: cannot be larger than 2GB each
384  constexpr long_index_t TwoGB = (long_index_t{1} << 31);
385 
386  if(!(a_grid_desc_m_k.GetElementSpaceSize() * sizeof(ADataType) <= TwoGB &&
387  b_grid_desc_n_k.GetElementSpaceSize() * sizeof(BDataType) <= TwoGB &&
388  e_grid_desc_m_n.GetElementSpaceSize() * sizeof(EDataType) <= TwoGB))
389  {
390  return false;
391  }
392 
393  return true;
394  }
395 
396  __host__ __device__ static constexpr bool CalculateHasMainKBlockLoop(index_t K)
397  {
398  const index_t num_loop = K / KPerBlock;
399 
400  return GridwiseGemmPipe::CalculateHasMainLoop(num_loop);
401  }
402 
403  using DsGridPointer = decltype(MakeDsGridPointer());
404 
405  template <typename ALayout, GemmSpecialization GemmSpec>
406  __host__ __device__ static auto
408  {
409  constexpr auto matrix_padder =
411  MPerBlock, NPerBlock, KPerBlock};
412 
413  const auto a_grid_desc_mraw_kraw = [&]() {
414  if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ALayout>)
415  {
416  return make_naive_tensor_descriptor(make_tuple(MRaw, KRaw),
417  make_tuple(StrideA, I1));
418  }
419  else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ALayout>)
420  {
421  return make_naive_tensor_descriptor(make_tuple(MRaw, KRaw),
422  make_tuple(I1, StrideA));
423  }
424  }();
425 
426  return matrix_padder.PadADescriptor_M_K(a_grid_desc_mraw_kraw);
427  }
428 
429  template <typename BLayout, GemmSpecialization GemmSpec>
430  __host__ __device__ static auto
432  {
433  constexpr auto matrix_padder =
435  MPerBlock, NPerBlock, KPerBlock};
436 
437  const auto b_grid_desc_nraw_kraw = [&]() {
439  {
440  return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw),
441  make_tuple(I1, StrideB));
442  }
444  {
445  return make_naive_tensor_descriptor(make_tuple(NRaw, KRaw),
446  make_tuple(StrideB, I1));
447  }
448  }();
449 
450  return matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw);
451  }
452 
453  template <typename ELayout, GemmSpecialization GemmSpec>
454  __host__ __device__ static auto
456  {
457  constexpr auto matrix_padder =
459  MPerBlock, NPerBlock, KPerBlock};
460  const auto e_grid_desc_mraw_nraw = [&]() {
462  {
463  return make_naive_tensor_descriptor(make_tuple(MRaw, NRaw),
464  make_tuple(StrideE, I1));
465  }
467  {
468  return make_naive_tensor_descriptor(make_tuple(MRaw, NRaw),
469  make_tuple(I1, StrideE));
470  }
471  }();
472 
473  return matrix_padder.PadCDescriptor_M_N(e_grid_desc_mraw_nraw);
474  }
475 
476 #ifdef CK_CODE_GEN_RTC
477  template <typename DsLayout, GemmSpecialization GemmSpec>
478  __host__ __device__ static auto
480  const ck::Array<index_t, NumDTensor>& NRaws,
481  const ck::Array<index_t, NumDTensor>& DsStride)
482 #else
483  template <typename DsLayout, GemmSpecialization GemmSpec>
484  __host__ __device__ static auto
485  MakeDsGridDescriptor_M_N(const std::array<index_t, NumDTensor>& MRaws,
486  const std::array<index_t, NumDTensor>& NRaws,
487  const std::array<index_t, NumDTensor>& DsStride)
488 #endif
489  {
490  return generate_tuple(
491  [&](auto i) {
492  using DLayout = remove_cvref_t<tuple_element_t<i.value, DsLayout>>;
493 
494  return MakeEGridDescriptor_M_N<DLayout, GemmSpec>(MRaws[i], NRaws[i], DsStride[i]);
495  },
497  }
498 
499  __device__ __host__ static constexpr auto GetMPerBlock() { return MPerBlock; }
500 
501  template <bool HasMainKBlockLoop,
502  typename AGridDesc_AK0_M_AK1,
503  typename BGridDesc_BK0_N_BK1,
504  typename DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock,
505  typename EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock,
506  typename Block2ETileMap>
507  __device__ static void Run(const ADataType* __restrict__ p_a_grid,
508  const BDataType* __restrict__ p_b_grid,
509  DsGridPointer p_ds_grid,
510  EDataType* __restrict__ p_e_grid,
511  void* __restrict__ p_shared,
512  const AElementwiseOperation& a_element_op,
513  const BElementwiseOperation& b_element_op,
514  const CDEElementwiseOperation& cde_element_op,
515  const AGridDesc_AK0_M_AK1& a_grid_desc_ak0_m_ak1,
516  const BGridDesc_BK0_N_BK1& b_grid_desc_bk0_n_bk1,
517  const DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock&
518  ds_grid_desc_mblock_mperblock_nblock_nperblock,
519  const EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock&
520  e_grid_desc_mblock_mperblock_nblock_nperblock,
521  const Block2ETileMap& block_2_etile_map)
522  {
523  const auto a_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
524  p_a_grid, a_grid_desc_ak0_m_ak1.GetElementSpaceSize());
525 
526  const auto b_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
527  p_b_grid, b_grid_desc_bk0_n_bk1.GetElementSpaceSize());
528 
529  const auto ds_grid_buf = generate_tuple(
530  [&](auto i) {
531  return make_dynamic_buffer<AddressSpaceEnum::Global>(
532  p_ds_grid[i],
533  ds_grid_desc_mblock_mperblock_nblock_nperblock[i].GetElementSpaceSize());
534  },
536 
537  auto e_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
538  p_e_grid, e_grid_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
539 
540  // divide block work by [M, N]
541  const auto block_work_idx =
542  block_2_etile_map.CalculateBottomIndex(make_multi_index(get_block_1d_id()));
543 
544  if(!block_2_etile_map.ValidCTileIndex(
545  block_work_idx,
546  make_tuple(e_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(I0),
547  e_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(I2))))
548  {
549  return;
550  }
551 
552  // HACK: this force m/n_block_data_idx_on_grid into SGPR
553  const index_t m_block_data_idx_on_grid =
554  __builtin_amdgcn_readfirstlane(block_work_idx[I0] * MPerBlock);
555 
556  const index_t n_block_data_idx_on_grid =
557  __builtin_amdgcn_readfirstlane(block_work_idx[I1] * NPerBlock);
558 
559  // lds max alignment
560  constexpr auto max_lds_align = math::lcm(AK1, BK1);
561 
562  // A matrix in LDS memory, dst of blockwise copy
563  constexpr auto a_block_desc_ak0_m_ak1 = GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1();
564 
565  // B matrix in LDS memory, dst of blockwise copy
566  constexpr auto b_block_desc_bk0_n_bk1 = GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1();
567 
568  // A matrix blockwise copy
569  auto a_blockwise_copy =
571  AElementwiseOperation,
575  ABlockTransferThreadClusterLengths_AK0_M_AK1,
576  ABlockTransferThreadClusterArrangeOrder,
577  ADataType,
579  decltype(a_grid_desc_ak0_m_ak1),
580  decltype(a_block_desc_ak0_m_ak1),
581  ABlockTransferSrcAccessOrder,
583  ABlockTransferSrcVectorDim,
584  2,
585  ABlockTransferSrcScalarPerVector,
586  ABlockTransferDstScalarPerVector_AK1,
587  1,
588  1,
589  AThreadTransferSrcResetCoordinateAfterRun,
590  true,
591  NumGemmKPrefetchStage>(
592  a_grid_desc_ak0_m_ak1,
593  make_multi_index(0, m_block_data_idx_on_grid, 0),
594  a_element_op,
595  a_block_desc_ak0_m_ak1,
596  make_multi_index(0, 0, 0),
598 
599  // B matrix blockwise copy
600  auto b_blockwise_copy =
602  BElementwiseOperation,
606  BBlockTransferThreadClusterLengths_BK0_N_BK1,
607  BBlockTransferThreadClusterArrangeOrder,
608  BDataType,
610  decltype(b_grid_desc_bk0_n_bk1),
611  decltype(b_block_desc_bk0_n_bk1),
612  BBlockTransferSrcAccessOrder,
614  BBlockTransferSrcVectorDim,
615  2,
616  BBlockTransferSrcScalarPerVector,
617  BBlockTransferDstScalarPerVector_BK1,
618  1,
619  1,
620  BThreadTransferSrcResetCoordinateAfterRun,
621  true,
622  NumGemmKPrefetchStage>(
623  b_grid_desc_bk0_n_bk1,
624  make_multi_index(0, n_block_data_idx_on_grid, 0),
625  b_element_op,
626  b_block_desc_bk0_n_bk1,
627  make_multi_index(0, 0, 0),
629 
630  // GEMM definition
631  // c_mtx += transpose(a_mtx) * b_mtx
632  // a_mtx[K0PerBlock, MPerBlock] is in LDS
633  // b_mtx[K0PerBlock, NPerBlock] is in LDS
634  // c_mtx[MPerBlock, NPerBlock] is distributed among threads, and saved in
635  // register
636  // sanity check
637  constexpr index_t KPack = math::max(
638  math::lcm(AK1, BK1),
640  .k_per_blk);
641 
643  BlockSize,
646  AccDataType,
647  decltype(a_block_desc_ak0_m_ak1),
648  decltype(b_block_desc_bk0_n_bk1),
649  MPerXdl,
650  NPerXdl,
651  MXdlPerWave,
652  NXdlPerWave,
653  KPack,
654  LoopSched>();
655 
656  auto c_thread_buf = blockwise_gemm.GetCThreadBuffer();
657 
658  // LDS allocation for A and B: be careful of alignment
659  constexpr auto a_block_space_size_aligned = math::integer_least_multiple(
660  a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
661 
662  auto a_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
663  static_cast<AComputeDataType*>(p_shared), a_block_desc_ak0_m_ak1.GetElementSpaceSize());
664 
665  auto b_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
666  static_cast<BComputeDataType*>(p_shared) + a_block_space_size_aligned,
667  b_block_desc_bk0_n_bk1.GetElementSpaceSize());
668 
669  constexpr auto a_block_slice_copy_step = make_multi_index(KPerBlock / AK1, 0, 0);
670  constexpr auto b_block_slice_copy_step = make_multi_index(KPerBlock / BK1, 0, 0);
671 
672  // gridwise GEMM pipeline
673  const auto gridwise_gemm_pipeline =
674  GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>();
675 
676  const index_t num_k_block_main_loop = __builtin_amdgcn_readfirstlane(
677  (a_grid_desc_ak0_m_ak1.GetLength(I0) * a_grid_desc_ak0_m_ak1.GetLength(I2)) /
678  KPerBlock);
679 
680  gridwise_gemm_pipeline.template Run<HasMainKBlockLoop>(a_grid_desc_ak0_m_ak1,
681  a_block_desc_ak0_m_ak1,
682  a_blockwise_copy,
683  a_grid_buf,
684  a_block_buf,
685  a_block_slice_copy_step,
686  b_grid_desc_bk0_n_bk1,
687  b_block_desc_bk0_n_bk1,
688  b_blockwise_copy,
689  b_grid_buf,
690  b_block_buf,
691  b_block_slice_copy_step,
692  blockwise_gemm,
693  c_thread_buf,
694  num_k_block_main_loop);
695 
696  // shuffle C and write out
697  {
698  static_assert(MXdlPerWave % CShuffleMXdlPerWavePerShuffle == 0 &&
699  NXdlPerWave % CShuffleNXdlPerWavePerShuffle == 0,
700  "wrong!");
701 
702  constexpr index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
703  constexpr index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
704 
705  // TODO: hacky, fix it!
706  constexpr auto c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2 =
707  blockwise_gemm.GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
708 
709  // TODO: hacky, fix it!
710  // c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp is only used to get lengths
711  constexpr auto c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp =
712  blockwise_gemm.GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
713 
714  constexpr auto M0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I0);
715  constexpr auto N0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I1);
716  constexpr auto M1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I2);
717  constexpr auto N1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I3);
718  constexpr auto M2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I4);
719  constexpr auto M3 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I5);
720  constexpr auto M4 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I6);
721  constexpr auto N2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(I7);
722 
723  constexpr auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
725 
726  auto c_shuffle_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
727  static_cast<CShuffleDataType*>(p_shared),
728  c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
729 
730  constexpr auto c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2 = transform_tensor_descriptor(
731  c_shuffle_block_desc_mblock_mperblock_nblock_nperblock,
732  make_tuple(
735  Number<CShuffleMXdlPerWavePerShuffle>{}, // M0 (MXdlPerWave) per shuffle
736  M1, // M1 = MWave
737  M2, // M2 * M3 * M4 = MPerXdl
738  M3,
739  M4)),
742  Number<CShuffleNXdlPerWavePerShuffle>{}, // N0 (NXdlPerWave) per shuffle
743  N1, // N1 = NWave
744  N2))), // N2 = NPerXdl
746  make_tuple(
748 
749  // calculate origin of thread output tensor on global memory
750  // blockwise GEMM c matrix starting index
751  const auto c_thread_mtx_on_block =
752  blockwise_gemm.CalculateCThreadOriginDataIndex(I0, I0, I0, I0);
753 
754  const index_t m_thread_data_on_block = c_thread_mtx_on_block[I0];
755  const index_t n_thread_data_on_block = c_thread_mtx_on_block[I1];
756 
757  const auto m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor =
759  make_tuple(make_merge_transform(make_tuple(M0, M1, M2, M3, M4))),
762 
763  const auto m_thread_data_on_block_idx =
764  m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor.CalculateBottomIndex(
765  make_multi_index(m_thread_data_on_block));
766 
767  const auto n_thread_data_on_block_to_n0_n1_n2_adaptor =
772 
773  const auto n_thread_data_on_block_idx =
774  n_thread_data_on_block_to_n0_n1_n2_adaptor.CalculateBottomIndex(
775  make_multi_index(n_thread_data_on_block));
776 
777  // shuffle: threadwise copy C from VGPR to LDS
778  auto c_thread_copy_vgpr_to_lds =
780  CShuffleDataType,
781  decltype(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2),
782  decltype(c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2),
784  Sequence<CShuffleMXdlPerWavePerShuffle,
785  CShuffleNXdlPerWavePerShuffle,
786  I1,
787  I1,
788  M2,
789  I1,
790  M4,
791  I1>,
793  7,
794  1,
796  1,
797  true>{
798  c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
800  0,
801  m_thread_data_on_block_idx[I1],
802  n_thread_data_on_block_idx[I1],
803  m_thread_data_on_block_idx[I2],
804  m_thread_data_on_block_idx[I3],
805  m_thread_data_on_block_idx[I4],
806  n_thread_data_on_block_idx[I2]),
808 
809  // tuple of reference to C/Ds tensor descriptors
810  const auto c_ds_desc_refs = concat_tuple_of_reference(
811  tie(c_shuffle_block_desc_mblock_mperblock_nblock_nperblock),
812  generate_tie(
813  [&](auto i) -> const auto& // return type should be reference
814  { return ds_grid_desc_mblock_mperblock_nblock_nperblock[i]; },
815  Number<NumDTensor>{}));
816 
817  // tuple of reference to C/Ds tensor descriptors
818  const auto c_ds_buf_refs = concat_tuple_of_reference(
819  tie(c_shuffle_block_buf),
820  generate_tie(
821  [&](auto i) -> const auto& // return type should be reference
822  { return ds_grid_buf[i]; },
823  Number<NumDTensor>{}));
824 
825  // tuple of starting index of C/Ds blockwise copy
826  const auto idx_c_ds_block_begin = container_concat(
827  make_tuple(make_multi_index(0, 0, 0, 0)),
829  [&](auto) {
830  return make_multi_index(block_work_idx[I0], 0, block_work_idx[I1], 0);
831  },
832  Number<NumDTensor>{}));
833 
834  // blockwise copy C/D/E between LDS and global
835  auto cde_block_copy_lds_and_global = ThreadGroupTensorSliceTransfer_v7<
837  decltype(container_concat(make_tuple(CShuffleDataType{}), DsDataType{})),
839  decltype(c_ds_desc_refs),
840  decltype(tie(e_grid_desc_mblock_mperblock_nblock_nperblock)),
841  CDEElementwiseOperation,
842  Sequence<static_cast<index_t>(EGlobalMemoryDataOperation)>, // FIXME: make Sequence
843  // support arbitray type
844  Sequence<1,
845  CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
846  1,
847  CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>, // BlockSliceLengths,
848  CDEBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
849  Sequence<0, 1, 2, 3>, // typename ThreadClusterArrangeOrder,
850  Sequence<0, 1, 2, 3>, // typename DimAccessOrder,
851  3, // index_t VectorDim,
852  CDEShuffleBlockTransferScalarPerVector_NPerBlock,
856  false>>, // ThreadTransferSrcResetCoordinateAfterRunFlags
857  Sequence<false>> // ThreadTransferDstResetCoordinateAfterRunFlags
858  {c_ds_desc_refs,
859  idx_c_ds_block_begin,
860  tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
861  make_tuple(make_multi_index(block_work_idx[I0], 0, block_work_idx[I1], 0)),
862  cde_element_op};
863 
864  // space filling curve for threadwise C in VGPR before shuffle
865  constexpr auto sfc_c_vgpr =
868  Sequence<CShuffleMXdlPerWavePerShuffle,
869  CShuffleNXdlPerWavePerShuffle,
870  1,
871  1,
872  M2,
873  1,
874  M4,
875  1>>{};
876 
877  // space filling curve for shuffled blockwise C/D/E
878  constexpr auto sfc_cde_block =
881  Sequence<1,
882  CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
883  1,
884  CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>>{};
885 
886  constexpr index_t num_access = sfc_c_vgpr.GetNumOfAccess();
887 
888  static_assert(num_access == sfc_cde_block.GetNumOfAccess(), "wrong!");
889 
890  static_for<0, num_access, 1>{}([&](auto access_id) {
891  // make sure it's safe to write to LDS
892  block_sync_lds();
893 
894  // each thread write its data from VGPR to LDS
895  c_thread_copy_vgpr_to_lds.Run(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2,
896  sfc_c_vgpr.GetIndexTupleOfNumber(access_id),
897  c_thread_buf,
898  c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
899  c_shuffle_block_buf);
900 
901  // make sure it's safe to read from LDS
902  block_sync_lds();
903 
904  // each block copy its data from LDS to global
905  cde_block_copy_lds_and_global.Run(
906  c_ds_desc_refs,
907  c_ds_buf_refs,
908  tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
909  tie(e_grid_buf));
910 
911  if constexpr(access_id < num_access - 1)
912  {
913  constexpr auto cde_lds_and_global_step =
914  sfc_cde_block.GetForwardStep(access_id);
915 
916  // move on Ds
917  static_for<0, NumDTensor, 1>{}([&](auto i) {
918  cde_block_copy_lds_and_global.MoveSrcSliceWindow(
919  c_ds_desc_refs, i + I1, cde_lds_and_global_step);
920  });
921 
922  // move on E
923  cde_block_copy_lds_and_global.MoveDstSliceWindow(
924  tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
925  I0,
926  cde_lds_and_global_step);
927  }
928  });
929  }
930  }
931 
932  template <bool HasMainKBlockLoop,
933  GemmSpecialization GemmSpec,
934  typename ALayout,
935  typename BLayout,
936  typename DsLayout,
937  typename ELayout,
938  typename Block2ETileMap>
939  __device__ static void Run(const void* __restrict__ p_a_grid_,
940  const void* __restrict__ p_b_grid_,
941  DsGridPointer p_ds_grid,
942  void* __restrict__ p_e_grid_,
943  void* __restrict__ p_shared,
944  const AElementwiseOperation& a_element_op,
945  const BElementwiseOperation& b_element_op,
946  const CDEElementwiseOperation& cde_element_op,
947  const index_t M,
948  const index_t N,
949  const index_t K,
950  const index_t StrideA,
951  const index_t StrideB,
952 #ifdef CK_CODE_GEN_RTC
953  const ck::Array<index_t, NumDTensor> StrideDs,
954 #else
955  const std::array<index_t, NumDTensor> StrideDs,
956 #endif
957  const index_t StrideE,
958  const Block2ETileMap& block_2_etile_map)
959  {
960  const auto p_a_grid = reinterpret_cast<const ADataType*>(p_a_grid_);
961  const auto p_b_grid = reinterpret_cast<const BDataType*>(p_b_grid_);
962  const auto p_e_grid = reinterpret_cast<EDataType*>(p_e_grid_);
963 
964  // tensor descriptors for problem definiton
965  const auto a_grid_desc_m_k = MakeAGridDescriptor_M_K<ALayout, GemmSpec>(M, K, StrideA);
966  const auto b_grid_desc_n_k = MakeBGridDescriptor_N_K<BLayout, GemmSpec>(K, N, StrideB);
967 
968  using DsGridDesc_M_N =
970 
971  DsGridDesc_M_N ds_grid_desc_m_n;
972 
973  static_for<0, NumDTensor, 1>{}([&](auto j) {
974  using DLayout = remove_cvref_t<tuple_element_t<j.value, DsLayout>>;
975 
976  ds_grid_desc_m_n(j) = MakeEGridDescriptor_M_N<DLayout, GemmSpec>(M, N, StrideDs[j]);
977  });
978 
979  const auto e_grid_desc_m_n = MakeEGridDescriptor_M_N<ELayout, GemmSpec>(M, N, StrideE);
980 
981  // tensor descriptors for block/thread-wise copy
982  const auto a_grid_desc_ak0_m_ak1 = MakeDefaultAGridDescriptor_AK0_M_AK1(a_grid_desc_m_k);
983 
984  const auto b_grid_desc_bk0_n_bk1 = MakeDefaultBGridDescriptor_BK0_N_BK1(b_grid_desc_n_k);
985 
986  using DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock =
988  DsGridDesc_M_N{}))>;
989 
990  DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock ds_grid_desc_mblock_mperblock_nblock_nperblock;
991 
992  static_for<0, NumDTensor, 1>{}([&](auto j) {
993  ds_grid_desc_mblock_mperblock_nblock_nperblock(j) =
995  });
996 
997  const auto e_grid_desc_mblock_mperblock_nblock_nperblock =
999 
1000  Run<HasMainKBlockLoop>(p_a_grid,
1001  p_b_grid,
1002  p_ds_grid,
1003  p_e_grid,
1004  p_shared,
1005  a_element_op,
1006  b_element_op,
1007  cde_element_op,
1008  a_grid_desc_ak0_m_ak1,
1009  b_grid_desc_bk0_n_bk1,
1010  ds_grid_desc_mblock_mperblock_nblock_nperblock,
1011  e_grid_desc_mblock_mperblock_nblock_nperblock,
1012  block_2_etile_map);
1013  }
1014 
1015  template <bool HasMainKBlockLoop,
1016  typename AGridDesc_MK,
1017  typename BGridDesc_NK,
1018  typename DsGridDesc_MN,
1019  typename EGridDesc_MN,
1020  typename Block2ETileMap>
1021  __device__ static void Run(const void* __restrict__ p_a_grid_,
1022  const void* __restrict__ p_b_grid_,
1023  DsGridPointer p_ds_grid,
1024  void* __restrict__ p_e_grid_,
1025  void* __restrict__ p_shared,
1026  const AElementwiseOperation& a_element_op,
1027  const BElementwiseOperation& b_element_op,
1028  const CDEElementwiseOperation& cde_element_op,
1029  const AGridDesc_MK& a_grid_desc_m_k,
1030  const BGridDesc_NK& b_grid_desc_n_k,
1031  const DsGridDesc_MN& ds_grid_desc_m_n,
1032  const EGridDesc_MN& e_grid_desc_m_n,
1033  const Block2ETileMap& block_2_etile_map)
1034  {
1035  const auto p_a_grid = reinterpret_cast<const ADataType*>(p_a_grid_);
1036  const auto p_b_grid = reinterpret_cast<const BDataType*>(p_b_grid_);
1037  const auto p_e_grid = reinterpret_cast<EDataType*>(p_e_grid_);
1038 
1039  // tensor descriptors for block/thread-wise copy
1040  const auto a_grid_desc_ak0_m_ak1 = MakeDefaultAGridDescriptor_AK0_M_AK1(a_grid_desc_m_k);
1041  const auto b_grid_desc_bk0_n_bk1 = MakeDefaultBGridDescriptor_BK0_N_BK1(b_grid_desc_n_k);
1042 
1043  using DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock =
1045  DsGridDesc_MN{}))>;
1046 
1047  DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock ds_grid_desc_mblock_mperblock_nblock_nperblock;
1048 
1049  static_for<0, NumDTensor, 1>{}([&](auto j) {
1050  ds_grid_desc_mblock_mperblock_nblock_nperblock(j) =
1052  });
1053 
1054  const auto e_grid_desc_mblock_mperblock_nblock_nperblock =
1056 
1057  Run<HasMainKBlockLoop>(p_a_grid,
1058  p_b_grid,
1059  p_ds_grid,
1060  p_e_grid,
1061  p_shared,
1062  a_element_op,
1063  b_element_op,
1064  cde_element_op,
1065  a_grid_desc_ak0_m_ak1,
1066  b_grid_desc_bk0_n_bk1,
1067  ds_grid_desc_mblock_mperblock_nblock_nperblock,
1068  e_grid_desc_mblock_mperblock_nblock_nperblock,
1069  block_2_etile_map);
1070  }
1071 };
1072 
1073 } // namespace ck
Y __host__ constexpr __device__ auto lcm(X x, Y y)
Definition: math.hpp:198
__host__ constexpr __device__ auto integer_least_multiple(X x, Y y)
Definition: math.hpp:78
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
GemmSpecialization
Definition: gemm_specialization.hpp:11
Definition: ck.hpp:264
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
constexpr auto BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector()
Definition: blockwise_gemm_xdlops.hpp:606
__host__ constexpr __device__ auto generate_tie(F &&f, Number< N >)
Definition: tuple_helper.hpp:22
__host__ constexpr __device__ auto make_naive_tensor_descriptor(const Tuple< Lengths... > &lengths, const Tuple< Strides... > &strides)
Definition: tensor_descriptor_helper.hpp:49
typename uniform_sequence_gen< NSize, I >::type uniform_sequence_gen_t
Definition: sequence.hpp:901
typename tuple_element< I, TTuple >::type tuple_element_t
Definition: tuple.hpp:208
__host__ constexpr __device__ auto generate_tuple(F &&f, Number< N >)
Definition: tuple_helper.hpp:15
InMemoryDataOperationEnum
Definition: ck.hpp:267
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
int64_t long_index_t
Definition: ck.hpp:290
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:429
ushort bhalf_t
Definition: data_type.hpp:24
__host__ constexpr __device__ auto make_freeze_transform(const LowerIndex &low_idx)
Definition: multi_index_transform_helper.hpp:98
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:22
constexpr Tuple< Args &... > tie(Args &... args) noexcept
Definition: tuple.hpp:218
typename conditional< predicate, X, Y >::type conditional_t
Definition: functional.hpp:115
__host__ constexpr __device__ auto container_concat(const X &x, const Ys &... ys)
Definition: container_helper.hpp:320
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto concat_tuple_of_reference(const Tuple< X &... > &tx, const Tuple< Y &... > &ty)
Definition: tuple_helper.hpp:30
typename sequence_merge< Sx, Sy >::type sequence_merge_t
Definition: sequence.hpp:898
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:300
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
LoopScheduler
Definition: loop_scheduler.hpp:15
int32_t index_t
Definition: ck.hpp:289
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
__device__ void block_sync_lds()
Definition: synchronization.hpp:10
PipelineVersion
Definition: gridwise_gemm_pipeline_selector.hpp:17
Definition: array.hpp:14
Definition: block_to_ctile_map.hpp:260
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:78
__host__ static __device__ bool CheckTensorTransfersValidity(index_t MRaw, index_t NRaw, index_t KRaw)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:262
__host__ static constexpr __device__ auto MakeEGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const EGridDesc_M_N &e_grid_desc_m_n)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:221
AComputeDataType_ AComputeDataType
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:109
__host__ static __device__ auto MakeBGridDescriptor_N_K(index_t KRaw, index_t NRaw, index_t StrideB)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:431
__host__ static constexpr __device__ bool CalculateHasMainKBlockLoop(index_t K)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:396
__host__ static constexpr __device__ auto MakeDefaultBGridDescriptor_BK0_N_BK1(const BGridDesc_N_K &b_grid_desc_n_k)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:204
static constexpr auto I2
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:85
static __device__ void Run(const void *__restrict__ p_a_grid_, const void *__restrict__ p_b_grid_, DsGridPointer p_ds_grid, void *__restrict__ p_e_grid_, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const index_t M, const index_t N, const index_t K, const index_t StrideA, const index_t StrideB, const std::array< index_t, NumDTensor > StrideDs, const index_t StrideE, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:939
static constexpr auto I6
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:89
static __device__ void Run(const ADataType *__restrict__ p_a_grid, const BDataType *__restrict__ p_b_grid, DsGridPointer p_ds_grid, EDataType *__restrict__ p_e_grid, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const AGridDesc_AK0_M_AK1 &a_grid_desc_ak0_m_ak1, const BGridDesc_BK0_N_BK1 &b_grid_desc_bk0_n_bk1, const DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock &ds_grid_desc_mblock_mperblock_nblock_nperblock, const EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock &e_grid_desc_mblock_mperblock_nblock_nperblock, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:507
static constexpr auto BK1
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:94
decltype(MakeDsGridPointer()) DsGridPointer
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:403
static constexpr auto I5
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:88
__host__ static constexpr __device__ auto MakeDefaultBlock2ETileMap(const EGridDesc_M_N &e_grid_desc_m_n)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:254
__host__ static constexpr __device__ auto GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:121
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:98
static constexpr auto AK1
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:93
__host__ static constexpr __device__ auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:113
__host__ static constexpr __device__ auto GetCShuffleBlockDescriptor_MBlock_MPerBlock_NBlock_NPerBlock()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:130
remove_cvref_t< decltype(GridwiseGemmPipeline_Selector< PipelineVer, NumGemmKPrefetchStage, LoopSched >())> GridwiseGemmPipe
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:101
__host__ static constexpr __device__ bool CheckValidity(const AGridDesc_M_K &a_grid_desc_m_k, const BGridDesc_N_K &b_grid_desc_n_k, const DsGridDesc_M_N &ds_grid_desc_m_n, const EGridDesc_M_N &e_grid_desc_m_n, [[maybe_unused]] const Block2ETileMap &)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:329
__host__ static constexpr __device__ auto MakeDefaultAGridDescriptor_AK0_M_AK1(const AGridDesc_M_K &a_grid_desc_m_k)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:187
__host__ static constexpr __device__ index_t GetSharedMemoryNumberOfByte()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:157
static constexpr auto BK0PerBlock
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:96
static constexpr index_t NumDTensor
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:79
__host__ static __device__ auto MakeDsGridDescriptor_M_N(const std::array< index_t, NumDTensor > &MRaws, const std::array< index_t, NumDTensor > &NRaws, const std::array< index_t, NumDTensor > &DsStride)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:485
static __device__ void Run(const void *__restrict__ p_a_grid_, const void *__restrict__ p_b_grid_, DsGridPointer p_ds_grid, void *__restrict__ p_e_grid_, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const AGridDesc_MK &a_grid_desc_m_k, const BGridDesc_NK &b_grid_desc_n_k, const DsGridDesc_MN &ds_grid_desc_m_n, const EGridDesc_MN &e_grid_desc_m_n, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:1021
BComputeDataType_ BComputeDataType
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:110
__host__ static constexpr __device__ auto MakeDsGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const DsGridDesc_M_N &ds_grid_desc_m_n)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:242
static constexpr auto AK0PerBlock
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:95
static constexpr auto I0
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:83
static constexpr auto I7
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:90
static constexpr auto I1
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:84
ck::tensor_operation::device::GemmSpecialization GemmSpecialization
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:81
__host__ static __device__ auto MakeAGridDescriptor_M_K(index_t MRaw, index_t KRaw, index_t StrideA)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:407
static constexpr auto I4
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:87
static constexpr auto I3
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:86
__device__ static constexpr __host__ auto GetMPerBlock()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:499
static constexpr auto MakeDsGridPointer()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:146
__host__ static __device__ auto MakeEGridDescriptor_M_N(index_t MRaw, index_t NRaw, index_t StrideE)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:455
Definition: xdlops_gemm.hpp:886
Definition: sequence.hpp:43
Definition: tensor_space_filling_curve.hpp:20
Blockwise data transfer.
Definition: thread_group_tensor_slice_transfer_v4r1.hpp:46
Definition: thread_group_tensor_slice_transfer_v7.hpp:42
Definition: threadwise_tensor_slice_transfer.hpp:39
Definition: tuple.hpp:117
Definition: integral_constant.hpp:10
Definition: type.hpp:177
Definition: functional2.hpp:31
Definition: matrix_padder.hpp:180
Definition: unary_element_wise_operation.hpp:241