20 template <
typename GridwiseGemm,
 
   26           typename CGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock,
 
   27           typename AElementwiseOperation,
 
   28           typename BElementwiseOperation,
 
   29           typename CElementwiseOperation,
 
   30           typename Block2CTileMap,
 
   31           bool HasMainKBlockLoop>
 
   33 #if CK_USE_LAUNCH_BOUNDS 
   37                      const BDataType* __restrict__ p_b_grid,
 
   38                      CDataType* __restrict__ p_c_grid,
 
   39                      const AGridDesc a_grid_desc,
 
   40                      const BGridDesc b_grid_desc,
 
   41                      const CGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock
 
   42                          c_grid_desc_mblock_mperblock_nblock_nperblock,
 
   43                      const AElementwiseOperation a_element_op,
 
   44                      const BElementwiseOperation b_element_op,
 
   45                      const CElementwiseOperation c_element_op,
 
   46                      const Block2CTileMap block_2_ctile_map)
 
   48 #if(defined(__gfx11__) || defined(__gfx12__)) 
   49     __shared__ 
char p_shared[GridwiseGemm::SharedMemTrait::lds_size];
 
   51     GridwiseGemm::template Run<HasMainKBlockLoop>(p_a_grid,
 
   57                                                   c_grid_desc_mblock_mperblock_nblock_nperblock,
 
   68     ignore = c_grid_desc_mblock_mperblock_nblock_nperblock;
 
   72     ignore = block_2_ctile_map;
 
   80           typename CShuffleDataType,
 
   85           typename CGridDesc_M_N,
 
   86           typename AElementwiseOperation,
 
   87           typename BElementwiseOperation,
 
   88           typename CElementwiseOperation,
 
   97           typename ABlockTransferThreadClusterLengths_K0_M_K1,
 
   98           typename ABlockTransferThreadClusterArrangeOrder,
 
   99           typename ABlockTransferSrcAccessOrder,
 
  100           index_t ABlockTransferSrcVectorDim,
 
  101           index_t ABlockTransferSrcScalarPerVector,
 
  102           index_t ABlockTransferDstScalarPerVector_K1,
 
  103           bool AThreadTransferSrcResetCoordinateAfterRun,
 
  105           bool ABlockLdsExtraM,
 
  106           typename BBlockTransferThreadClusterLengths_K0_N_K1,
 
  107           typename BBlockTransferThreadClusterArrangeOrder,
 
  108           typename BBlockTransferSrcAccessOrder,
 
  109           index_t BBlockTransferSrcVectorDim,
 
  110           index_t BBlockTransferSrcScalarPerVector,
 
  111           index_t BBlockTransferDstScalarPerVector_K1,
 
  112           bool BThreadTransferSrcResetCoordinateAfterRun,
 
  114           bool BBlockLdsExtraN,
 
  115           index_t CShuffleMRepeatPerShuffle,
 
  116           index_t CShuffleNRepeatPerShuffle,
 
  117           typename CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
 
  118           index_t CShuffleBlockTransferScalarPerVector_NPerBlock,
 
  119           index_t NumGemmKPrefetchStage = 1,
 
  136     static constexpr 
auto MWaves = MPerBlock / (MRepeat * MPerWmma);
 
  137     static constexpr 
auto NWaves = NPerBlock / (NRepeat * NPerWmma);
 
  138     static constexpr 
auto WmmaK  = 
K1 == 16 ? 32 : 16;
 
  144                                                               NumGemmKPrefetchStage,
 
  152         constexpr 
auto a_block_desc = [&]() {
 
  153             if constexpr(AEnableLds)
 
  156                 constexpr 
auto K0PerBlock    = KPerBlock / 
K1;
 
  157                 constexpr 
auto max_lds_align = 
K1;
 
  159                 if constexpr(ABlockLdsExtraM)
 
  173                 constexpr 
auto A_KRow        = 
I2;
 
  174                 constexpr 
auto KWmmaPerblock = KPerBlock / 
WmmaK;
 
  175                 constexpr 
auto K0PerWmma     = 
WmmaK / A_KRow / 
K1;
 
  200         constexpr 
auto b_block_desc = [&]() {
 
  201             if constexpr(BEnableLds)
 
  204                 constexpr 
auto K0PerBlock    = KPerBlock / 
K1;
 
  205                 constexpr 
auto max_lds_align = 
K1;
 
  207                 if constexpr(BBlockLdsExtraN)
 
  222                 constexpr 
auto B_KRow        = 
I2;
 
  223                 constexpr 
auto KWmmaPerblock = KPerBlock / 
WmmaK;
 
  224                 constexpr 
auto K0PerWmma     = 
WmmaK / B_KRow / 
K1;
 
  249         constexpr 
auto a_block_copy_step = [&]() {
 
  250             if constexpr(AEnableLds)
 
  252                 constexpr 
auto K0PerBlock = KPerBlock / 
K1;
 
  258                 constexpr 
auto KWmmaPerBlock = KPerBlock / 
WmmaK;
 
  264         return a_block_copy_step;
 
  269         constexpr 
auto b_block_copy_step = [&]() {
 
  270             if constexpr(BEnableLds)
 
  272                 constexpr 
auto K0PerBlock = KPerBlock / 
K1;
 
  278                 constexpr 
auto KWmmaPerBlock = KPerBlock / 
WmmaK;
 
  284         return b_block_copy_step;
 
  288     template <
typename ABlockDesc_>
 
  292         constexpr 
auto a_wave_desc = [&]() {
 
  293             if constexpr(AEnableLds)
 
  296                 constexpr 
auto A_K0 = ABlockDesc_{}.GetLength(
I0);
 
  297                 constexpr 
auto A_K1 = ABlockDesc_{}.GetLength(
I2);
 
  299                 constexpr 
auto A_KRow = 
I2;
 
  301                 constexpr 
auto A_KRow = 
I1;
 
  316                 constexpr 
auto KWmma     = ABlockDesc_{}.GetLength(
I0);
 
  317                 constexpr 
auto K0PerWmma = ABlockDesc_{}.GetLength(
I3);
 
  318                 constexpr 
auto A_KRow    = ABlockDesc_{}.GetLength(
I4);
 
  319                 constexpr 
auto A_K1      = ABlockDesc_{}.GetLength(
I6);
 
  352     template <
typename BBlockDesc_>
 
  355         constexpr 
auto b_wave_desc = [&]() {
 
  356             if constexpr(BEnableLds)
 
  359                 constexpr 
auto B_K0 = BBlockDesc_{}.GetLength(
I0);
 
  360                 constexpr 
auto B_K1 = BBlockDesc_{}.GetLength(
I2);
 
  362                 constexpr 
auto B_KRow = 
I2;
 
  364                 constexpr 
auto B_KRow = 
I1;
 
  378                 constexpr 
auto KWmma     = BBlockDesc_{}.GetLength(
I0);
 
  379                 constexpr 
auto K0PerWmma = BBlockDesc_{}.GetLength(
I3);
 
  380                 constexpr 
auto B_KRow    = BBlockDesc_{}.GetLength(
I4);
 
  381                 constexpr 
auto B_K1      = BBlockDesc_{}.GetLength(
I6);
 
  396     __host__ __device__ 
static constexpr 
auto 
  400         constexpr 
auto c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat =
 
  407         return c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat;
 
  411     template <
typename Block2CTileMap>
 
  412     __host__ __device__ 
static constexpr 
bool CheckValidity(
const AGridDesc& a_grid_desc,
 
  413                                                             const BGridDesc& b_grid_desc,
 
  414                                                             const CGridDesc_M_N& c_grid_desc_m_n,
 
  415                                                             const Block2CTileMap& block_2_ctile_map)
 
  418                       "wrong! K1 need to be known at compile-time");
 
  420         static_assert((MPerBlock % (MPerWmma * MRepeat) == 0) &&
 
  421                           (NPerBlock % (NRepeat * NPerWmma)) == 0,
 
  422                       "Invalid tuning param!");
 
  424         const auto GetAProblemsizeMK = [&]() {
 
  425             if constexpr(AEnableLds)
 
  428                                   a_grid_desc.GetLength(
I0) * a_grid_desc.GetLength(
I2));
 
  432                 return make_tuple(a_grid_desc.GetLength(
I1) * a_grid_desc.GetLength(
I2) *
 
  433                                       a_grid_desc.GetLength(
I5),
 
  434                                   a_grid_desc.GetLength(
I0) * a_grid_desc.GetLength(
I3) *
 
  435                                       a_grid_desc.GetLength(
I4) * a_grid_desc.GetLength(
I6));
 
  439         const auto GetBProblemsizeNK = [&]() {
 
  440             if constexpr(BEnableLds)
 
  443                                   b_grid_desc.GetLength(
I0) * b_grid_desc.GetLength(
I2));
 
  447                 return make_tuple(b_grid_desc.GetLength(
I1) * b_grid_desc.GetLength(
I2) *
 
  448                                       b_grid_desc.GetLength(
I5),
 
  449                                   b_grid_desc.GetLength(
I0) * b_grid_desc.GetLength(
I3) *
 
  450                                       b_grid_desc.GetLength(
I4) * b_grid_desc.GetLength(
I6));
 
  454         const auto M = GetAProblemsizeMK()[
I0];
 
  455         const auto N = GetBProblemsizeNK()[
I0];
 
  456         const auto K = GetAProblemsizeMK()[
I1];
 
  458         if(!(M == c_grid_desc_m_n.GetLength(
I0) && N == c_grid_desc_m_n.GetLength(
I1) &&
 
  459              K == GetBProblemsizeNK()[
I1]))
 
  461             printf(
"A: MxK = %d x %d, B: NxK = %d x %d, C: MxN = %d x %d\n",
 
  462                    GetAProblemsizeMK()[
I0],
 
  463                    GetAProblemsizeMK()[
I1],
 
  464                    GetBProblemsizeNK()[
I0],
 
  465                    GetBProblemsizeNK()[
I1],
 
  466                    c_grid_desc_m_n.GetLength(
I0),
 
  467                    c_grid_desc_m_n.GetLength(
I1));
 
  468             printf(
"GridwiseOp err: ProblemSize check");
 
  472         if(!(M % MPerBlock == 0 && N % NPerBlock == 0 && K % KPerBlock == 0))
 
  474             printf(
"GridwiseOp err: ProblemSize division");
 
  479         const auto num_k_loop = K / KPerBlock;
 
  481         if(!GridwiseGemmPipe::IsSupported(num_k_loop))
 
  483             printf(
"GridwiseOp err: Pipeline not support this k_loop");
 
  487         if(!block_2_ctile_map.CheckValidity(c_grid_desc_m_n))
 
  495         if(!(a_grid_desc.GetElementSpaceSize() * 
sizeof(ADataType) <= TwoGB &&
 
  496              b_grid_desc.GetElementSpaceSize() * 
sizeof(BDataType) <= TwoGB))
 
  505         const index_t num_loop = K / KPerBlock;
 
  507         return GridwiseGemmPipe::CalculateHasMainLoop(num_loop);
 
  510     __host__ __device__ 
static constexpr 
auto 
  513         const auto M = c_grid_desc_m_n.GetLength(
I0);
 
  514         const auto N = c_grid_desc_m_n.GetLength(
I1);
 
  516         const auto MBlock = M / MPerBlock;
 
  517         const auto NBlock = N / NPerBlock;
 
  526         return c_grid_desc_mblock_mperblock_nblock_nperblock;
 
  558                 .GetElementSpaceSize();
 
  574     template <
bool HasMainKBlockLoop, 
typename Block2CTileMap = DefaultBlock2CTileMap>
 
  575     __device__ 
static void Run(
const ADataType* __restrict__ p_a_grid,
 
  576                                const BDataType* __restrict__ p_b_grid,
 
  577                                CDataType* __restrict__ p_c_grid,
 
  578                                void* __restrict__ p_shared,
 
  579                                const AGridDesc& a_grid_desc,
 
  580                                const BGridDesc& b_grid_desc,
 
  582                                    c_grid_desc_mblock_mperblock_nblock_nperblock,
 
  583                                const AElementwiseOperation& a_element_op,
 
  584                                const BElementwiseOperation& b_element_op,
 
  585                                const CElementwiseOperation& c_element_op,
 
  586                                const Block2CTileMap& block_2_ctile_map)
 
  591         const auto a_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
 
  592             p_a_grid, a_grid_desc.GetElementSpaceSize());
 
  593         const auto b_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
 
  594             p_b_grid, b_grid_desc.GetElementSpaceSize());
 
  595         auto c_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
 
  596             p_c_grid, c_grid_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
 
  601         if(!block_2_ctile_map.ValidCTileIndex(
 
  603                make_tuple(c_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I0),
 
  604                           c_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I2))))
 
  608         const index_t m_block_data_idx_on_grid = __builtin_amdgcn_readfirstlane(block_work_idx[
I0] * MPerBlock);
 
  609         const index_t n_block_data_idx_on_grid = __builtin_amdgcn_readfirstlane(block_work_idx[
I1] * NPerBlock);
 
  613         const auto K = [&](){
 
  614             if constexpr(AEnableLds){
 
  615                 return a_grid_desc.GetLength(
I0) * a_grid_desc.GetLength(
I2);
 
  618                 return a_grid_desc.GetLength(
I0) * a_grid_desc.GetLength(
I3) 
 
  619                         * a_grid_desc.GetLength(
I4) * a_grid_desc.GetLength(
I6);
 
  626         auto a_block_trait = [&](){
 
  628             if constexpr(AEnableLds)
 
  630                 constexpr 
auto K0PerBlock = KPerBlock/ 
K1;
 
  631                 auto a_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
 
  632                     static_cast<ADataType*
>(p_shared), 
 
  635                 auto a_blockwise_copy =
 
  637     AElementwiseOperation,
 
  641     ABlockTransferThreadClusterLengths_K0_M_K1,
 
  642     ABlockTransferThreadClusterArrangeOrder,
 
  645     decltype(a_grid_desc),
 
  646     decltype(a_block_desc),
 
  647     ABlockTransferSrcAccessOrder,
 
  649     ABlockTransferSrcVectorDim,
 
  651     ABlockTransferSrcScalarPerVector,
 
  652     ABlockTransferDstScalarPerVector_K1,
 
  655     AThreadTransferSrcResetCoordinateAfterRun,
 
  657                                                         NumGemmKPrefetchStage>(
 
  665                 return make_tuple(a_block_buf, a_blockwise_copy);
 
  671                 constexpr 
auto KWmmaPerBlock = KPerBlock / 
WmmaK;
 
  672                 constexpr 
auto K0PerWmma     = 
WmmaK/2/K1Value;
 
  673                 auto a_block_buf = make_static_buffer<AddressSpaceEnum::Vgpr, ADataType>(
 
  674                     a_block_desc.GetElementSpaceSize());
 
  677                 auto a_blockwise_copy =
 
  680                                                      decltype(a_grid_desc),
 
  681                                                      decltype(a_block_desc),
 
  691                                                      ABlockTransferSrcScalarPerVector,
 
  692                                                      AThreadTransferSrcResetCoordinateAfterRun,
 
  696                                      m_block_data_idx_on_grid/(
MWaves * MPerWmma), 
 
  703                 return make_tuple(a_block_buf, a_blockwise_copy);
 
  707         auto b_block_trait = [&](){
 
  708             if constexpr(BEnableLds)
 
  710                 constexpr 
auto K0PerBlock = KPerBlock/ 
K1;
 
  711                 auto b_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
 
  715                 auto b_blockwise_copy =
 
  717                                                         BElementwiseOperation,
 
  721                                                         BBlockTransferThreadClusterLengths_K0_N_K1,
 
  722                                                         BBlockTransferThreadClusterArrangeOrder,
 
  725                                                         decltype(b_grid_desc),
 
  726                                                         decltype(b_block_desc),
 
  727                                                         BBlockTransferSrcAccessOrder,
 
  729                                                         BBlockTransferSrcVectorDim,
 
  731                                                         BBlockTransferSrcScalarPerVector,
 
  732                                                         BBlockTransferDstScalarPerVector_K1,
 
  735                                                         BThreadTransferSrcResetCoordinateAfterRun,
 
  737                                                         NumGemmKPrefetchStage>(
 
  745                 return make_tuple(b_block_buf, b_blockwise_copy);
 
  751                 constexpr 
auto KWmmaPerBlock = KPerBlock / 
WmmaK;
 
  752                 constexpr 
auto K0PerWmma     = 
WmmaK/2/K1Value;
 
  753                 auto b_block_buf = make_static_buffer<AddressSpaceEnum::Vgpr, BDataType>(
 
  754                     b_block_desc.GetElementSpaceSize());
 
  757                 auto b_blockwise_copy =
 
  760                                                      decltype(b_grid_desc),
 
  761                                                      decltype(b_block_desc),
 
  771                                                      BBlockTransferSrcScalarPerVector,
 
  772                                                      BThreadTransferSrcResetCoordinateAfterRun,
 
  776                                      n_block_data_idx_on_grid/(
NWaves * NPerWmma), 
 
  783                 return make_tuple(b_block_buf, b_blockwise_copy);
 
  787         auto a_block_buf       = a_block_trait()[
I0];
 
  788         auto a_blockwise_copy  = a_block_trait()[
I1];
 
  790         auto b_block_buf       = b_block_trait()[
I0];
 
  791         auto b_blockwise_copy  = b_block_trait()[
I1];
 
  796         auto blockwise_gemm =
 
  823         const index_t KBlockMainLoop = __builtin_amdgcn_readfirstlane(K / KPerBlock);
 
  824         GridwiseGemmPipe::template Run<HasMainKBlockLoop>(a_grid_desc,
 
  829                                                           a_block_slice_copy_step,
 
  835                                                           b_block_slice_copy_step,
 
  843             constexpr 
auto c_thread_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs =  
 
  844             blockwise_gemm.GetCThreadDescriptor_MRepeat_MWave_MSubGroup_NRepeat_NWave_NThreadPerSubGroup_MAccVgprs();
 
  847             constexpr 
auto c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs_tmp =
 
  848                 blockwise_gemm.GetCBlockDescriptor_MRepeat_MWave_MSubGroup_NRepeat_NWave_NThreadPerSubGroup_MAccVgprs();
 
  850             constexpr 
auto MWave              = c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs_tmp.GetLength(
I1);
 
  851             constexpr 
auto MSubGroup          = c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs_tmp.GetLength(
I2);
 
  852             constexpr 
auto NWave              = c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs_tmp.GetLength(
I4);
 
  853             constexpr 
auto NThreadPerSubGroup = c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs_tmp.GetLength(
I5);
 
  854             constexpr 
auto MAccVgprs          = c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs_tmp.GetLength(
I6);
 
  857             constexpr 
auto c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat =
 
  860             auto c_shuffle_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
 
  865                 c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat,
 
  877                         NThreadPerSubGroup))),               
 
  883             const auto c_thread_mtx_on_block = blockwise_gemm.CalculateCThreadOriginDataIndex(
I0, 
I0);
 
  885             const index_t m_thread_data_on_block = c_thread_mtx_on_block[
I0];
 
  886             const index_t n_thread_data_on_block = c_thread_mtx_on_block[
I1];
 
  888             const auto m_thread_data_on_block_to_mrepeat_mwave_msubgroup_maccvgprs_adaptor =
 
  894             const auto n_thread_data_on_block_to_nrepeat_nwave_nthreadpersubgroup_adaptor =
 
  900             const auto m_thread_data_on_block_idx = m_thread_data_on_block_to_mrepeat_mwave_msubgroup_maccvgprs_adaptor.CalculateBottomIndex(
 
  903             const auto n_thread_data_on_block_idx = n_thread_data_on_block_to_nrepeat_nwave_nthreadpersubgroup_adaptor.CalculateBottomIndex(
 
  907             auto c_thread_copy_vgpr_to_lds =
 
  910                                                    decltype(c_thread_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs),
 
  911                                                    decltype(c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs),
 
  916                                                             CShuffleNRepeatPerShuffle,
 
  926                     c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs,
 
  928                                      m_thread_data_on_block_idx[
I1],
 
  929                                      m_thread_data_on_block_idx[
I2],
 
  931                                      n_thread_data_on_block_idx[
I1],
 
  932                                      n_thread_data_on_block_idx[
I2],
 
  933                                      m_thread_data_on_block_idx[
I3]),
 
  939                 CElementwiseOperation,      
 
  940                 CGlobalMemoryDataOperation, 
 
  942                          CShuffleMRepeatPerShuffle * MWave * MPerWmma,
 
  944                          CShuffleNRepeatPerShuffle * NWave * NPerWmma>, 
 
  945                 CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
 
  949                 decltype(c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat),
 
  950                 decltype(c_grid_desc_mblock_mperblock_nblock_nperblock),
 
  953                 CShuffleBlockTransferScalarPerVector_NPerBlock, 
 
  956                 {c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat,
 
  958                  c_grid_desc_mblock_mperblock_nblock_nperblock,
 
  964             constexpr 
auto sfc_c_vgpr =
 
  970                                            CShuffleNRepeatPerShuffle,
 
  976             constexpr 
auto sfc_c_global =
 
  980                                            CShuffleMRepeatPerShuffle * MWave * MPerWmma,
 
  982                                            CShuffleNRepeatPerShuffle * NWave * NPerWmma>>{};
 
  984             constexpr 
index_t num_access = sfc_c_vgpr.GetNumOfAccess();
 
  986             static_assert(num_access == sfc_c_global.GetNumOfAccess(), 
"wrong!");
 
  993                 c_thread_copy_vgpr_to_lds.Run(c_thread_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs,
 
  994                                               sfc_c_vgpr.GetIndexTupleOfNumber(access_id),
 
  996                                               c_block_desc_mrepeat_mwave_msubgroup_nrepeat_nwave_nthreadpersubgroup_maccvgprs,
 
  997                                               c_shuffle_block_buf);
 
 1003                 c_shuffle_block_copy_lds_to_global.Run(
 
 1004                     c_shuffle_block_desc_mshrepeat_mpershrepeat_nshrepeat_npershrepeat,
 
 1005                     c_shuffle_block_buf,
 
 1006                     c_grid_desc_mblock_mperblock_nblock_nperblock,
 
 1009                 if constexpr(access_id < num_access - 1)
 
 1011                     constexpr 
auto c_global_step = sfc_c_global.GetForwardStep(access_id);
 
 1014                     c_shuffle_block_copy_lds_to_global.MoveDstSliceWindow(
 
 1015                         c_grid_desc_mblock_mperblock_nblock_nperblock, c_global_step);
 
#define CK_MIN_BLOCK_PER_CU
Definition: ck.hpp:30
 
#define CK_MAX_THREAD_PER_BLOCK
Definition: ck.hpp:29
 
__host__ constexpr __device__ auto integer_least_multiple(X x, Y y)
Definition: math.hpp:78
 
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
 
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
 
__host__ constexpr __device__ auto make_naive_tensor_descriptor(const Tuple< Lengths... > &lengths, const Tuple< Strides... > &strides)
Definition: tensor_descriptor_helper.hpp:49
 
constexpr auto GridwiseGemmPipeline_Selector()
Definition: gridwise_gemm_pipeline_selector.hpp:31
 
InMemoryDataOperationEnum
Definition: ck.hpp:275
 
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
 
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
 
__global__ void kernel_gemm_wmma(const ADataType *__restrict__ p_a_grid, const BDataType *__restrict__ p_b_grid, CDataType *__restrict__ p_c_grid, const AGridDesc a_grid_desc, const BGridDesc b_grid_desc, const CGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock c_grid_desc_mblock_mperblock_nblock_nperblock, const AElementwiseOperation a_element_op, const BElementwiseOperation b_element_op, const CElementwiseOperation c_element_op, const Block2CTileMap block_2_ctile_map)
Definition: gridwise_gemm_wmma.hpp:36
 
int64_t long_index_t
Definition: ck.hpp:298
 
__host__ constexpr __device__ auto make_naive_tensor_descriptor_aligned(const Tuple< Lengths... > &lengths, Align align)
Definition: tensor_descriptor_helper.hpp:132
 
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:425
 
__host__ constexpr __device__ auto make_freeze_transform(const LowerIndex &low_idx)
Definition: multi_index_transform_helper.hpp:98
 
constexpr detail::ignore_t ignore
Definition: ignore.hpp:20
 
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:25
 
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
 
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
 
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:297
 
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
 
LoopScheduler
Definition: loop_scheduler.hpp:15
 
int32_t index_t
Definition: ck.hpp:297
 
__device__ index_t get_thread_local_1d_id()
Definition: get_id.hpp:19
 
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
 
__device__ void block_sync_lds()
Definition: synchronization.hpp:10
 
PipelineVersion
Definition: gridwise_gemm_pipeline_selector.hpp:18
 
typename remove_cv< T >::type remove_cv_t
Definition: type.hpp:295
 
constexpr LoopScheduler make_default_loop_scheduler()
Definition: loop_scheduler.hpp:20
 
Definition: block_to_ctile_map.hpp:260
 
Definition: blockwise_gemm_wmma.hpp:550
 
__host__ constexpr __device__ auto & GetCThreadBuffer()
Definition: blockwise_gemm_wmma.hpp:585
 
Definition: gridwise_gemm_wmma.hpp:538
 
static constexpr auto c_shuffle_block_space_size
Definition: gridwise_gemm_wmma.hpp:556
 
static constexpr auto b_block_space_size_aligned
Definition: gridwise_gemm_wmma.hpp:547
 
static constexpr auto max_lds_align
Definition: gridwise_gemm_wmma.hpp:541
 
static constexpr auto c_shuffle_block_space_offset
Definition: gridwise_gemm_wmma.hpp:560
 
static constexpr auto lds_size
Definition: gridwise_gemm_wmma.hpp:562
 
static constexpr auto a_block_space_size_aligned
Definition: gridwise_gemm_wmma.hpp:543
 
static constexpr auto a_block_space_offset
Definition: gridwise_gemm_wmma.hpp:552
 
static constexpr auto b_block_space_offset
Definition: gridwise_gemm_wmma.hpp:553
 
Definition: gridwise_gemm_wmma.hpp:123
 
__host__ static constexpr __device__ auto MakeDefaultBlock2CTileMap(const CGridDesc_M_N &c_grid_desc_m_n, index_t, index_t)
Definition: gridwise_gemm_wmma.hpp:530
 
remove_cvref_t< decltype(MakeCGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(CGridDesc_M_N{}))> CGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock
Definition: gridwise_gemm_wmma.hpp:570
 
remove_cvref_t< decltype(GridwiseGemmPipeline_Selector< PipelineVer, NumGemmKPrefetchStage, LoopSched, AEnableLds, BEnableLds >())> GridwiseGemmPipe
Definition: gridwise_gemm_wmma.hpp:147
 
static constexpr auto I6
Definition: gridwise_gemm_wmma.hpp:130
 
static __device__ void Run(const ADataType *__restrict__ p_a_grid, const BDataType *__restrict__ p_b_grid, CDataType *__restrict__ p_c_grid, void *__restrict__ p_shared, const AGridDesc &a_grid_desc, const BGridDesc &b_grid_desc, const CGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock &c_grid_desc_mblock_mperblock_nblock_nperblock, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CElementwiseOperation &c_element_op, const Block2CTileMap &block_2_ctile_map)
Definition: gridwise_gemm_wmma.hpp:575
 
static constexpr auto I5
Definition: gridwise_gemm_wmma.hpp:129
 
__host__ static constexpr __device__ auto MakeBBlockDescriptor()
Definition: gridwise_gemm_wmma.hpp:198
 
__host__ static constexpr __device__ auto MakeBWaveDescriptor(const BBlockDesc_ &)
Definition: gridwise_gemm_wmma.hpp:353
 
__host__ static constexpr __device__ bool CheckValidity(const AGridDesc &a_grid_desc, const BGridDesc &b_grid_desc, const CGridDesc_M_N &c_grid_desc_m_n, const Block2CTileMap &block_2_ctile_map)
Definition: gridwise_gemm_wmma.hpp:412
 
__host__ static constexpr __device__ auto MakeCGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const CGridDesc_M_N &c_grid_desc_m_n)
Definition: gridwise_gemm_wmma.hpp:511
 
__host__ static constexpr __device__ auto GetCShuffleBlockDescriptor_MShRepeat_MPerShRepeat_NShRepeat_NPerShRepeat()
Definition: gridwise_gemm_wmma.hpp:398
 
__host__ static constexpr __device__ bool CalculateHasMainKBlockLoop(index_t K)
Definition: gridwise_gemm_wmma.hpp:503
 
static constexpr auto I7
Definition: gridwise_gemm_wmma.hpp:131
 
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: gridwise_gemm_wmma.hpp:140
 
__host__ static constexpr __device__ auto MakeBBlockSliceCopyStep()
Definition: gridwise_gemm_wmma.hpp:267
 
static constexpr auto K1
Definition: gridwise_gemm_wmma.hpp:134
 
static constexpr auto I4
Definition: gridwise_gemm_wmma.hpp:128
 
static constexpr auto I1
Definition: gridwise_gemm_wmma.hpp:125
 
static constexpr auto MWaves
Definition: gridwise_gemm_wmma.hpp:136
 
__host__ static constexpr __device__ auto MakeABlockSliceCopyStep()
Definition: gridwise_gemm_wmma.hpp:247
 
static constexpr auto I2
Definition: gridwise_gemm_wmma.hpp:126
 
remove_cvref_t< decltype(MakeDefaultBlock2CTileMap(CGridDesc_M_N{}, 1, 1))> DefaultBlock2CTileMap
Definition: gridwise_gemm_wmma.hpp:572
 
__host__ static constexpr __device__ auto MakeAWaveDescriptor(const ABlockDesc_ &)
Definition: gridwise_gemm_wmma.hpp:289
 
static constexpr auto I3
Definition: gridwise_gemm_wmma.hpp:127
 
__host__ static constexpr __device__ auto MakeABlockDescriptor()
Definition: gridwise_gemm_wmma.hpp:150
 
static constexpr auto NWaves
Definition: gridwise_gemm_wmma.hpp:137
 
static constexpr auto I0
Definition: gridwise_gemm_wmma.hpp:124
 
static constexpr auto WmmaK
Definition: gridwise_gemm_wmma.hpp:138
 
Definition: sequence.hpp:43
 
Definition: tensor_space_filling_curve.hpp:20
 
Blockwise data transfer.
Definition: thread_group_tensor_slice_transfer_v4r1.hpp:46
 
Definition: thread_group_tensor_slice_transfer_v6r1.hpp:34
 
Definition: threadwise_tensor_slice_transfer.hpp:39
 
Helper structure that facilitates transfer of source (grid) data to destination threads.
Definition: threadwise_tensor_slice_transfer.hpp:234
 
Definition: integral_constant.hpp:20
 
Definition: is_known_at_compile_time.hpp:14
 
Definition: functional2.hpp:33
 
Definition: unary_element_wise_operation.hpp:308