32 template <
typename ADataType,
34 typename AComputeDataType_,
36 typename CShuffleDataType,
39 typename AElementwiseOperation,
40 typename BElementwiseOperation,
41 typename CDEElementwiseOperation,
54 typename ABlockTransferThreadClusterLengths_AK0_M_AK1,
55 typename ABlockTransferThreadClusterArrangeOrder,
56 typename ABlockTransferSrcAccessOrder,
57 index_t ABlockTransferSrcVectorDim,
58 index_t ABlockTransferSrcScalarPerVector,
59 index_t ABlockTransferDstScalarPerVector_AK1,
60 bool AThreadTransferSrcResetCoordinateAfterRun,
62 typename BBlockTransferThreadClusterLengths_BK0_N_BK1,
63 typename BBlockTransferThreadClusterArrangeOrder,
64 typename BBlockTransferSrcAccessOrder,
65 index_t BBlockTransferSrcVectorDim,
66 index_t BBlockTransferSrcScalarPerVector,
67 index_t BBlockTransferDstScalarPerVector_BK1,
68 bool BThreadTransferSrcResetCoordinateAfterRun,
70 index_t CShuffleMXdlPerWavePerShuffle,
71 index_t CShuffleNXdlPerWavePerShuffle,
72 typename CDEBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
73 index_t CDEShuffleBlockTransferScalarPerVector_NPerBlock,
76 typename BComputeDataType_ = AComputeDataType_>
101 decltype(GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>())>;
103 #if CK_GFX90A_DENORM_WORKAROUND
129 __host__ __device__
static constexpr
auto
132 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
133 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
135 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
142 return c_shuffle_block_desc_mblock_mperblock_nblock_nperblock;
152 return static_cast<const DDataType*
>(
nullptr);
167 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
170 b_block_desc_bk0_n_bk1.GetElementSpaceSize(), max_lds_align);
173 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
176 constexpr
auto c_block_size =
177 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize();
181 c_block_size *
sizeof(CShuffleDataType));
185 template <
typename AGr
idDesc_M_K>
186 __host__ __device__
static constexpr
auto
189 const auto M = a_grid_desc_m_k.GetLength(
I0);
190 const auto K = a_grid_desc_m_k.GetLength(
I1);
192 const auto AK0 = K /
AK1;
202 template <
typename BGr
idDesc_N_K>
203 __host__ __device__
static constexpr
auto
206 const auto N = b_grid_desc_n_k.GetLength(
I0);
207 const auto K = b_grid_desc_n_k.GetLength(
I1);
209 const auto BK0 = K /
BK1;
219 template <
typename EGr
idDesc_M_N>
220 __host__ __device__
static constexpr
auto
223 const auto M = e_grid_desc_m_n.GetLength(
I0);
224 const auto N = e_grid_desc_m_n.GetLength(
I1);
226 const auto MBlock = M / MPerBlock;
227 const auto NBlock = N / NPerBlock;
236 return e_grid_desc_mblock_mperblock_nblock_nperblock;
240 template <
typename DsGr
idDesc_M_N>
241 __host__ __device__
static constexpr
auto
252 template <
typename EGr
idDesc_M_N>
253 __host__ __device__
static constexpr
auto
260 template <
typename ALayout,
typename BLayout,
typename ELayout>
261 __host__ __device__
static bool
265 const auto A_vector_dim_size = ABlockTransferSrcVectorDim == 2 ? KRaw : MRaw;
266 const auto B_vector_dim_size = BBlockTransferSrcVectorDim == 2 ? KRaw : NRaw;
267 const auto E_vector_dim_size = NRaw;
270 if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ALayout>)
272 if(!(A_vector_dim_size == KRaw &&
273 A_vector_dim_size % ABlockTransferSrcScalarPerVector == 0))
276 else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ALayout>)
278 if(!(A_vector_dim_size == MRaw &&
279 A_vector_dim_size % ABlockTransferSrcScalarPerVector == 0))
287 if constexpr(is_same_v<tensor_layout::gemm::RowMajor, BLayout>)
289 if(!(B_vector_dim_size == NRaw &&
290 B_vector_dim_size % BBlockTransferSrcScalarPerVector == 0))
293 else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, BLayout>)
295 if(!(B_vector_dim_size == KRaw &&
296 B_vector_dim_size % BBlockTransferSrcScalarPerVector == 0))
304 if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ELayout>)
306 if(!(E_vector_dim_size == NRaw &&
307 E_vector_dim_size % CDEShuffleBlockTransferScalarPerVector_NPerBlock == 0))
310 else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ELayout>)
312 if(!(E_vector_dim_size == NRaw &&
313 CDEShuffleBlockTransferScalarPerVector_NPerBlock == 1))
324 template <
typename AGridDesc_M_K,
325 typename BGridDesc_N_K,
326 typename DsGridDesc_M_N,
327 typename EGridDesc_M_N,
328 typename Block2ETileMap>
329 __host__ __device__
static constexpr
bool CheckValidity(
const AGridDesc_M_K& a_grid_desc_m_k,
330 const BGridDesc_N_K& b_grid_desc_n_k,
331 const DsGridDesc_M_N& ds_grid_desc_m_n,
332 const EGridDesc_M_N& e_grid_desc_m_n,
333 [[maybe_unused]]
const Block2ETileMap&)
335 static_assert((MPerBlock % (MPerXdl * MXdlPerWave) == 0) &&
336 (NPerBlock % (NXdlPerWave * NPerXdl)) == 0,
337 "Invalid tuning param!");
338 static_assert(KPerBlock % AK1Value == 0 && KPerBlock % BK1Value == 0,
339 "KPerBlock must be divisible by AK1Value and BK1Value!");
341 const auto M = a_grid_desc_m_k.GetLength(
I0);
342 const auto N = b_grid_desc_n_k.GetLength(
I0);
343 const auto AK = a_grid_desc_m_k.GetLength(
I1);
344 const auto BK = b_grid_desc_n_k.GetLength(
I1);
347 if(!(M == e_grid_desc_m_n.GetLength(
I0) && N == e_grid_desc_m_n.GetLength(
I1) && AK == BK))
354 valid = valid && (M == ds_grid_desc_m_n[i].GetLength(
I0) &&
355 N == ds_grid_desc_m_n[i].GetLength(
I1));
364 if(!(M % MPerBlock == 0 && N % NPerBlock == 0 && AK % KPerBlock == 0))
370 const auto num_k_loop = AK / KPerBlock;
371 if(!GridwiseGemmPipe::IsSupported(num_k_loop))
386 if(!(a_grid_desc_m_k.GetElementSpaceSize() *
sizeof(ADataType) <= TwoGB &&
387 b_grid_desc_n_k.GetElementSpaceSize() *
sizeof(BDataType) <= TwoGB &&
388 e_grid_desc_m_n.GetElementSpaceSize() *
sizeof(EDataType) <= TwoGB))
398 const index_t num_loop = K / KPerBlock;
400 return GridwiseGemmPipe::CalculateHasMainLoop(num_loop);
405 template <
typename ALayout, GemmSpecialization GemmSpec>
406 __host__ __device__
static auto
409 constexpr
auto matrix_padder =
411 MPerBlock, NPerBlock, KPerBlock};
413 const auto a_grid_desc_mraw_kraw = [&]() {
414 if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ALayout>)
419 else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ALayout>)
426 return matrix_padder.PadADescriptor_M_K(a_grid_desc_mraw_kraw);
429 template <
typename BLayout, GemmSpecialization GemmSpec>
430 __host__ __device__
static auto
433 constexpr
auto matrix_padder =
435 MPerBlock, NPerBlock, KPerBlock};
437 const auto b_grid_desc_nraw_kraw = [&]() {
450 return matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw);
453 template <
typename ELayout, GemmSpecialization GemmSpec>
454 __host__ __device__
static auto
457 constexpr
auto matrix_padder =
459 MPerBlock, NPerBlock, KPerBlock};
460 const auto e_grid_desc_mraw_nraw = [&]() {
473 return matrix_padder.PadCDescriptor_M_N(e_grid_desc_mraw_nraw);
476 #ifdef CK_CODE_GEN_RTC
477 template <
typename DsLayout, GemmSpecialization GemmSpec>
478 __host__ __device__
static auto
483 template <
typename DsLayout, GemmSpecialization GemmSpec>
484 __host__ __device__
static auto
486 const std::array<index_t, NumDTensor>& NRaws,
487 const std::array<index_t, NumDTensor>& DsStride)
494 return MakeEGridDescriptor_M_N<DLayout, GemmSpec>(MRaws[i], NRaws[i], DsStride[i]);
499 __device__ __host__
static constexpr
auto GetMPerBlock() {
return MPerBlock; }
501 template <
bool HasMainKBlockLoop,
502 typename AGridDesc_AK0_M_AK1,
503 typename BGridDesc_BK0_N_BK1,
504 typename DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock,
505 typename EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock,
506 typename Block2ETileMap>
507 __device__
static void Run(
const ADataType* __restrict__ p_a_grid,
508 const BDataType* __restrict__ p_b_grid,
510 EDataType* __restrict__ p_e_grid,
511 void* __restrict__ p_shared,
512 const AElementwiseOperation& a_element_op,
513 const BElementwiseOperation& b_element_op,
514 const CDEElementwiseOperation& cde_element_op,
515 const AGridDesc_AK0_M_AK1& a_grid_desc_ak0_m_ak1,
516 const BGridDesc_BK0_N_BK1& b_grid_desc_bk0_n_bk1,
517 const DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock&
518 ds_grid_desc_mblock_mperblock_nblock_nperblock,
519 const EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock&
520 e_grid_desc_mblock_mperblock_nblock_nperblock,
521 const Block2ETileMap& block_2_etile_map)
523 const auto a_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
524 p_a_grid, a_grid_desc_ak0_m_ak1.GetElementSpaceSize());
526 const auto b_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
527 p_b_grid, b_grid_desc_bk0_n_bk1.GetElementSpaceSize());
531 return make_dynamic_buffer<AddressSpaceEnum::Global>(
533 ds_grid_desc_mblock_mperblock_nblock_nperblock[i].GetElementSpaceSize());
537 auto e_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
538 p_e_grid, e_grid_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
541 const auto block_work_idx =
544 if(!block_2_etile_map.ValidCTileIndex(
546 make_tuple(e_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I0),
547 e_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I2))))
553 const index_t m_block_data_idx_on_grid =
554 __builtin_amdgcn_readfirstlane(block_work_idx[
I0] * MPerBlock);
556 const index_t n_block_data_idx_on_grid =
557 __builtin_amdgcn_readfirstlane(block_work_idx[
I1] * NPerBlock);
569 auto a_blockwise_copy =
571 AElementwiseOperation,
575 ABlockTransferThreadClusterLengths_AK0_M_AK1,
576 ABlockTransferThreadClusterArrangeOrder,
579 decltype(a_grid_desc_ak0_m_ak1),
580 decltype(a_block_desc_ak0_m_ak1),
581 ABlockTransferSrcAccessOrder,
583 ABlockTransferSrcVectorDim,
585 ABlockTransferSrcScalarPerVector,
586 ABlockTransferDstScalarPerVector_AK1,
589 AThreadTransferSrcResetCoordinateAfterRun,
591 NumGemmKPrefetchStage>(
592 a_grid_desc_ak0_m_ak1,
595 a_block_desc_ak0_m_ak1,
600 auto b_blockwise_copy =
602 BElementwiseOperation,
606 BBlockTransferThreadClusterLengths_BK0_N_BK1,
607 BBlockTransferThreadClusterArrangeOrder,
610 decltype(b_grid_desc_bk0_n_bk1),
611 decltype(b_block_desc_bk0_n_bk1),
612 BBlockTransferSrcAccessOrder,
614 BBlockTransferSrcVectorDim,
616 BBlockTransferSrcScalarPerVector,
617 BBlockTransferDstScalarPerVector_BK1,
620 BThreadTransferSrcResetCoordinateAfterRun,
622 NumGemmKPrefetchStage>(
623 b_grid_desc_bk0_n_bk1,
626 b_block_desc_bk0_n_bk1,
647 decltype(a_block_desc_ak0_m_ak1),
648 decltype(b_block_desc_bk0_n_bk1),
656 auto c_thread_buf = blockwise_gemm.GetCThreadBuffer();
660 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
662 auto a_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
663 static_cast<AComputeDataType*
>(p_shared), a_block_desc_ak0_m_ak1.GetElementSpaceSize());
665 auto b_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
667 b_block_desc_bk0_n_bk1.GetElementSpaceSize());
673 const auto gridwise_gemm_pipeline =
674 GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>();
676 const index_t num_k_block_main_loop = __builtin_amdgcn_readfirstlane(
677 (a_grid_desc_ak0_m_ak1.GetLength(
I0) * a_grid_desc_ak0_m_ak1.GetLength(
I2)) /
680 gridwise_gemm_pipeline.template Run<HasMainKBlockLoop>(a_grid_desc_ak0_m_ak1,
681 a_block_desc_ak0_m_ak1,
685 a_block_slice_copy_step,
686 b_grid_desc_bk0_n_bk1,
687 b_block_desc_bk0_n_bk1,
691 b_block_slice_copy_step,
694 num_k_block_main_loop);
698 static_assert(MXdlPerWave % CShuffleMXdlPerWavePerShuffle == 0 &&
699 NXdlPerWave % CShuffleNXdlPerWavePerShuffle == 0,
702 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
703 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
706 constexpr
auto c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2 =
707 blockwise_gemm.GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
711 constexpr
auto c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp =
712 blockwise_gemm.GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
714 constexpr
auto M0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I0);
715 constexpr
auto N0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I1);
716 constexpr
auto M1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I2);
717 constexpr
auto N1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I3);
718 constexpr
auto M2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I4);
719 constexpr
auto M3 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I5);
720 constexpr
auto M4 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I6);
721 constexpr
auto N2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I7);
723 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
726 auto c_shuffle_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
727 static_cast<CShuffleDataType*
>(p_shared),
728 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
731 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock,
751 const auto c_thread_mtx_on_block =
752 blockwise_gemm.CalculateCThreadOriginDataIndex(
I0,
I0,
I0,
I0);
754 const index_t m_thread_data_on_block = c_thread_mtx_on_block[
I0];
755 const index_t n_thread_data_on_block = c_thread_mtx_on_block[
I1];
757 const auto m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor =
763 const auto m_thread_data_on_block_idx =
764 m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor.CalculateBottomIndex(
767 const auto n_thread_data_on_block_to_n0_n1_n2_adaptor =
773 const auto n_thread_data_on_block_idx =
774 n_thread_data_on_block_to_n0_n1_n2_adaptor.CalculateBottomIndex(
778 auto c_thread_copy_vgpr_to_lds =
781 decltype(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2),
782 decltype(c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2),
784 Sequence<CShuffleMXdlPerWavePerShuffle,
785 CShuffleNXdlPerWavePerShuffle,
798 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
801 m_thread_data_on_block_idx[
I1],
802 n_thread_data_on_block_idx[
I1],
803 m_thread_data_on_block_idx[
I2],
804 m_thread_data_on_block_idx[
I3],
805 m_thread_data_on_block_idx[
I4],
806 n_thread_data_on_block_idx[
I2]),
811 tie(c_shuffle_block_desc_mblock_mperblock_nblock_nperblock),
813 [&](
auto i) ->
const auto&
814 {
return ds_grid_desc_mblock_mperblock_nblock_nperblock[i]; },
819 tie(c_shuffle_block_buf),
821 [&](
auto i) ->
const auto&
822 {
return ds_grid_buf[i]; },
839 decltype(c_ds_desc_refs),
840 decltype(
tie(e_grid_desc_mblock_mperblock_nblock_nperblock)),
841 CDEElementwiseOperation,
845 CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
847 CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>,
848 CDEBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
852 CDEShuffleBlockTransferScalarPerVector_NPerBlock,
859 idx_c_ds_block_begin,
860 tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
865 constexpr
auto sfc_c_vgpr =
868 Sequence<CShuffleMXdlPerWavePerShuffle,
869 CShuffleNXdlPerWavePerShuffle,
878 constexpr
auto sfc_cde_block =
882 CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
884 CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>>{};
886 constexpr
index_t num_access = sfc_c_vgpr.GetNumOfAccess();
888 static_assert(num_access == sfc_cde_block.GetNumOfAccess(),
"wrong!");
895 c_thread_copy_vgpr_to_lds.Run(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2,
896 sfc_c_vgpr.GetIndexTupleOfNumber(access_id),
898 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
899 c_shuffle_block_buf);
905 cde_block_copy_lds_and_global.Run(
908 tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
911 if constexpr(access_id < num_access - 1)
913 constexpr
auto cde_lds_and_global_step =
914 sfc_cde_block.GetForwardStep(access_id);
918 cde_block_copy_lds_and_global.MoveSrcSliceWindow(
919 c_ds_desc_refs, i +
I1, cde_lds_and_global_step);
923 cde_block_copy_lds_and_global.MoveDstSliceWindow(
924 tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
926 cde_lds_and_global_step);
932 template <
bool HasMainKBlockLoop,
938 typename Block2ETileMap>
939 __device__
static void Run(
const void* __restrict__ p_a_grid_,
940 const void* __restrict__ p_b_grid_,
942 void* __restrict__ p_e_grid_,
943 void* __restrict__ p_shared,
944 const AElementwiseOperation& a_element_op,
945 const BElementwiseOperation& b_element_op,
946 const CDEElementwiseOperation& cde_element_op,
952 #ifdef CK_CODE_GEN_RTC
955 const std::array<index_t, NumDTensor> StrideDs,
958 const Block2ETileMap& block_2_etile_map)
960 const auto p_a_grid =
reinterpret_cast<const ADataType*
>(p_a_grid_);
961 const auto p_b_grid =
reinterpret_cast<const BDataType*
>(p_b_grid_);
962 const auto p_e_grid =
reinterpret_cast<EDataType*
>(p_e_grid_);
965 const auto a_grid_desc_m_k = MakeAGridDescriptor_M_K<ALayout, GemmSpec>(M, K, StrideA);
966 const auto b_grid_desc_n_k = MakeBGridDescriptor_N_K<BLayout, GemmSpec>(K, N, StrideB);
968 using DsGridDesc_M_N =
971 DsGridDesc_M_N ds_grid_desc_m_n;
976 ds_grid_desc_m_n(j) = MakeEGridDescriptor_M_N<DLayout, GemmSpec>(M, N, StrideDs[j]);
979 const auto e_grid_desc_m_n = MakeEGridDescriptor_M_N<ELayout, GemmSpec>(M, N, StrideE);
986 using DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock =
990 DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock ds_grid_desc_mblock_mperblock_nblock_nperblock;
993 ds_grid_desc_mblock_mperblock_nblock_nperblock(j) =
997 const auto e_grid_desc_mblock_mperblock_nblock_nperblock =
1000 Run<HasMainKBlockLoop>(p_a_grid,
1008 a_grid_desc_ak0_m_ak1,
1009 b_grid_desc_bk0_n_bk1,
1010 ds_grid_desc_mblock_mperblock_nblock_nperblock,
1011 e_grid_desc_mblock_mperblock_nblock_nperblock,
1015 template <
bool HasMainKBlockLoop,
1016 typename AGridDesc_MK,
1017 typename BGridDesc_NK,
1018 typename DsGridDesc_MN,
1019 typename EGridDesc_MN,
1020 typename Block2ETileMap>
1021 __device__
static void Run(
const void* __restrict__ p_a_grid_,
1022 const void* __restrict__ p_b_grid_,
1024 void* __restrict__ p_e_grid_,
1025 void* __restrict__ p_shared,
1026 const AElementwiseOperation& a_element_op,
1027 const BElementwiseOperation& b_element_op,
1028 const CDEElementwiseOperation& cde_element_op,
1029 const AGridDesc_MK& a_grid_desc_m_k,
1030 const BGridDesc_NK& b_grid_desc_n_k,
1031 const DsGridDesc_MN& ds_grid_desc_m_n,
1032 const EGridDesc_MN& e_grid_desc_m_n,
1033 const Block2ETileMap& block_2_etile_map)
1035 const auto p_a_grid =
reinterpret_cast<const ADataType*
>(p_a_grid_);
1036 const auto p_b_grid =
reinterpret_cast<const BDataType*
>(p_b_grid_);
1037 const auto p_e_grid =
reinterpret_cast<EDataType*
>(p_e_grid_);
1043 using DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock =
1047 DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock ds_grid_desc_mblock_mperblock_nblock_nperblock;
1050 ds_grid_desc_mblock_mperblock_nblock_nperblock(j) =
1054 const auto e_grid_desc_mblock_mperblock_nblock_nperblock =
1057 Run<HasMainKBlockLoop>(p_a_grid,
1065 a_grid_desc_ak0_m_ak1,
1066 b_grid_desc_bk0_n_bk1,
1067 ds_grid_desc_mblock_mperblock_nblock_nperblock,
1068 e_grid_desc_mblock_mperblock_nblock_nperblock,
Y __host__ constexpr __device__ auto lcm(X x, Y y)
Definition: math.hpp:198
__host__ constexpr __device__ auto integer_least_multiple(X x, Y y)
Definition: math.hpp:78
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
GemmSpecialization
Definition: gemm_specialization.hpp:11
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
constexpr auto BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector()
Definition: blockwise_gemm_xdlops.hpp:606
__host__ constexpr __device__ auto generate_tie(F &&f, Number< N >)
Definition: tuple_helper.hpp:22
__host__ constexpr __device__ auto make_naive_tensor_descriptor(const Tuple< Lengths... > &lengths, const Tuple< Strides... > &strides)
Definition: tensor_descriptor_helper.hpp:49
typename uniform_sequence_gen< NSize, I >::type uniform_sequence_gen_t
Definition: sequence.hpp:901
typename tuple_element< I, TTuple >::type tuple_element_t
Definition: tuple.hpp:208
__host__ constexpr __device__ auto generate_tuple(F &&f, Number< N >)
Definition: tuple_helper.hpp:15
InMemoryDataOperationEnum
Definition: ck.hpp:267
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
int64_t long_index_t
Definition: ck.hpp:290
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:429
ushort bhalf_t
Definition: data_type.hpp:24
__host__ constexpr __device__ auto make_freeze_transform(const LowerIndex &low_idx)
Definition: multi_index_transform_helper.hpp:98
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:22
constexpr Tuple< Args &... > tie(Args &... args) noexcept
Definition: tuple.hpp:218
typename conditional< predicate, X, Y >::type conditional_t
Definition: functional.hpp:115
__host__ constexpr __device__ auto container_concat(const X &x, const Ys &... ys)
Definition: container_helper.hpp:320
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto concat_tuple_of_reference(const Tuple< X &... > &tx, const Tuple< Y &... > &ty)
Definition: tuple_helper.hpp:30
typename sequence_merge< Sx, Sy >::type sequence_merge_t
Definition: sequence.hpp:898
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:300
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
LoopScheduler
Definition: loop_scheduler.hpp:15
int32_t index_t
Definition: ck.hpp:289
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
__device__ void block_sync_lds()
Definition: synchronization.hpp:10
PipelineVersion
Definition: gridwise_gemm_pipeline_selector.hpp:17
Definition: block_to_ctile_map.hpp:260
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:78
__host__ static __device__ bool CheckTensorTransfersValidity(index_t MRaw, index_t NRaw, index_t KRaw)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:262
__host__ static constexpr __device__ auto MakeEGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const EGridDesc_M_N &e_grid_desc_m_n)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:221
AComputeDataType_ AComputeDataType
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:109
__host__ static __device__ auto MakeBGridDescriptor_N_K(index_t KRaw, index_t NRaw, index_t StrideB)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:431
__host__ static constexpr __device__ bool CalculateHasMainKBlockLoop(index_t K)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:396
__host__ static constexpr __device__ auto MakeDefaultBGridDescriptor_BK0_N_BK1(const BGridDesc_N_K &b_grid_desc_n_k)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:204
static constexpr auto I2
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:85
static __device__ void Run(const void *__restrict__ p_a_grid_, const void *__restrict__ p_b_grid_, DsGridPointer p_ds_grid, void *__restrict__ p_e_grid_, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const index_t M, const index_t N, const index_t K, const index_t StrideA, const index_t StrideB, const std::array< index_t, NumDTensor > StrideDs, const index_t StrideE, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:939
static constexpr auto I6
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:89
static __device__ void Run(const ADataType *__restrict__ p_a_grid, const BDataType *__restrict__ p_b_grid, DsGridPointer p_ds_grid, EDataType *__restrict__ p_e_grid, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const AGridDesc_AK0_M_AK1 &a_grid_desc_ak0_m_ak1, const BGridDesc_BK0_N_BK1 &b_grid_desc_bk0_n_bk1, const DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock &ds_grid_desc_mblock_mperblock_nblock_nperblock, const EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock &e_grid_desc_mblock_mperblock_nblock_nperblock, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:507
static constexpr auto BK1
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:94
decltype(MakeDsGridPointer()) DsGridPointer
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:403
static constexpr auto I5
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:88
__host__ static constexpr __device__ auto MakeDefaultBlock2ETileMap(const EGridDesc_M_N &e_grid_desc_m_n)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:254
__host__ static constexpr __device__ auto GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:121
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:98
static constexpr auto AK1
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:93
__host__ static constexpr __device__ auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:113
__host__ static constexpr __device__ auto GetCShuffleBlockDescriptor_MBlock_MPerBlock_NBlock_NPerBlock()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:130
remove_cvref_t< decltype(GridwiseGemmPipeline_Selector< PipelineVer, NumGemmKPrefetchStage, LoopSched >())> GridwiseGemmPipe
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:101
__host__ static constexpr __device__ bool CheckValidity(const AGridDesc_M_K &a_grid_desc_m_k, const BGridDesc_N_K &b_grid_desc_n_k, const DsGridDesc_M_N &ds_grid_desc_m_n, const EGridDesc_M_N &e_grid_desc_m_n, [[maybe_unused]] const Block2ETileMap &)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:329
__host__ static constexpr __device__ auto MakeDefaultAGridDescriptor_AK0_M_AK1(const AGridDesc_M_K &a_grid_desc_m_k)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:187
__host__ static constexpr __device__ index_t GetSharedMemoryNumberOfByte()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:157
static constexpr auto BK0PerBlock
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:96
static constexpr index_t NumDTensor
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:79
__host__ static __device__ auto MakeDsGridDescriptor_M_N(const std::array< index_t, NumDTensor > &MRaws, const std::array< index_t, NumDTensor > &NRaws, const std::array< index_t, NumDTensor > &DsStride)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:485
static __device__ void Run(const void *__restrict__ p_a_grid_, const void *__restrict__ p_b_grid_, DsGridPointer p_ds_grid, void *__restrict__ p_e_grid_, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const AGridDesc_MK &a_grid_desc_m_k, const BGridDesc_NK &b_grid_desc_n_k, const DsGridDesc_MN &ds_grid_desc_m_n, const EGridDesc_MN &e_grid_desc_m_n, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:1021
BComputeDataType_ BComputeDataType
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:110
__host__ static constexpr __device__ auto MakeDsGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const DsGridDesc_M_N &ds_grid_desc_m_n)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:242
static constexpr auto AK0PerBlock
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:95
static constexpr auto I0
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:83
static constexpr auto I7
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:90
static constexpr auto I1
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:84
ck::tensor_operation::device::GemmSpecialization GemmSpecialization
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:81
__host__ static __device__ auto MakeAGridDescriptor_M_K(index_t MRaw, index_t KRaw, index_t StrideA)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:407
static constexpr auto I4
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:87
static constexpr auto I3
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:86
__device__ static constexpr __host__ auto GetMPerBlock()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:499
static constexpr auto MakeDsGridPointer()
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:146
__host__ static __device__ auto MakeEGridDescriptor_M_N(index_t MRaw, index_t NRaw, index_t StrideE)
Definition: gridwise_gemm_multiple_d_xdl_cshuffle.hpp:455
Definition: xdlops_gemm.hpp:886
Definition: sequence.hpp:43
Definition: tensor_space_filling_curve.hpp:20
Blockwise data transfer.
Definition: thread_group_tensor_slice_transfer_v4r1.hpp:46
Definition: thread_group_tensor_slice_transfer_v7.hpp:42
Definition: threadwise_tensor_slice_transfer.hpp:39
Definition: tuple.hpp:117
Definition: integral_constant.hpp:10
Definition: functional2.hpp:31
Definition: matrix_padder.hpp:180
Definition: unary_element_wise_operation.hpp:241