31 template <
typename AsDataType,
33 typename AComputeDataType_,
35 typename CShuffleDataType,
38 typename AElementwiseOperation,
39 typename BElementwiseOperation,
40 typename CDEElementwiseOperation,
53 typename ABlockTransferThreadClusterLengths_AK0_M_AK1,
54 typename ABlockTransferThreadClusterArrangeOrder,
55 typename ABlockTransferSrcAccessOrder,
56 index_t ABlockTransferSrcVectorDim,
57 index_t ABlockTransferSrcScalarPerVector,
58 index_t ABlockTransferDstScalarPerVector_AK1,
59 bool AThreadTransferSrcResetCoordinateAfterRun,
61 typename BBlockTransferThreadClusterLengths_BK0_N_BK1,
62 typename BBlockTransferThreadClusterArrangeOrder,
63 typename BBlockTransferSrcAccessOrder,
64 index_t BBlockTransferSrcVectorDim,
65 index_t BBlockTransferSrcScalarPerVector,
66 index_t BBlockTransferDstScalarPerVector_BK1,
67 bool BThreadTransferSrcResetCoordinateAfterRun,
69 index_t CShuffleMXdlPerWavePerShuffle,
70 index_t CShuffleNXdlPerWavePerShuffle,
71 typename CDEBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
72 index_t CDEShuffleBlockTransferScalarPerVector_NPerBlock,
75 typename BComputeDataType_ = AComputeDataType_>
102 decltype(GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>())>;
104 #if CK_GFX90A_DENORM_WORKAROUND
130 __host__ __device__
static constexpr
auto
133 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
134 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
136 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
143 return c_shuffle_block_desc_mblock_mperblock_nblock_nperblock;
152 return static_cast<const ADataType*
>(
nullptr);
163 return static_cast<const BDataType*
>(
nullptr);
175 return static_cast<const DDataType*
>(
nullptr);
190 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
193 b_block_desc_bk0_n_bk1.GetElementSpaceSize(), max_lds_align);
196 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
199 constexpr
auto c_block_size =
200 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize();
204 c_block_size *
sizeof(CShuffleDataType));
208 template <
typename AGr
idDesc_M_K>
209 __host__ __device__
static constexpr
auto
212 const auto M = a_grid_desc_m_k.GetLength(
I0);
213 const auto K = a_grid_desc_m_k.GetLength(
I1);
215 const auto AK0 = K /
AK1;
224 template <
typename AsGr
idDesc_M_K>
225 __host__ __device__
static constexpr
auto
234 template <
typename BGr
idDesc_N_K>
235 __host__ __device__
static constexpr
auto
238 const auto N = b_grid_desc_n_k.GetLength(
I0);
239 const auto K = b_grid_desc_n_k.GetLength(
I1);
241 const auto BK0 = K /
BK1;
250 template <
typename BsGr
idDesc_N_K>
251 __host__ __device__
static constexpr
auto
260 template <
typename EGr
idDesc_M_N>
261 __host__ __device__
static constexpr
auto
264 const auto M = e_grid_desc_m_n.GetLength(
I0);
265 const auto N = e_grid_desc_m_n.GetLength(
I1);
267 const auto MBlock = M / MPerBlock;
268 const auto NBlock = N / NPerBlock;
277 return e_grid_desc_mblock_mperblock_nblock_nperblock;
281 template <
typename DsGr
idDesc_M_N>
282 __host__ __device__
static constexpr
auto
293 template <
typename EGr
idDesc_M_N>
294 __host__ __device__
static constexpr
auto
302 template <
typename AsGridDesc_M_K,
303 typename BsGridDesc_N_K,
304 typename DsGridDesc_M_N,
305 typename EGridDesc_M_N,
306 typename Block2ETileMap>
307 __host__ __device__
static constexpr
bool CheckValidity(
const AsGridDesc_M_K& as_grid_desc_m_k,
308 const BsGridDesc_N_K& bs_grid_desc_n_k,
309 const DsGridDesc_M_N& ds_grid_desc_m_n,
310 const EGridDesc_M_N& e_grid_desc_m_n,
311 const Block2ETileMap& block_2_etile_map)
313 static_assert((MPerBlock % (MPerXdl * MXdlPerWave) == 0) &&
314 (NPerBlock % (NXdlPerWave * NPerXdl)) == 0,
315 "Invalid tuning param!");
316 static_assert(KPerBlock % AK1Value == 0 && KPerBlock % BK1Value == 0,
317 "KPerBlock must be divisible by AK1Value and BK1Value!");
319 const auto M = as_grid_desc_m_k[
I0].GetLength(
I0);
320 const auto N = bs_grid_desc_n_k[
I0].GetLength(
I0);
321 const auto AK = as_grid_desc_m_k[
I0].GetLength(
I1);
322 const auto BK = bs_grid_desc_n_k[
I0].GetLength(
I1);
325 if(!(M == e_grid_desc_m_n.GetLength(
I0) && N == e_grid_desc_m_n.GetLength(
I1) && AK == BK))
336 valid && (as_grid_desc_m_k[i].GetElementSpaceSize() *
sizeof(ADataType) <= TwoGB);
337 valid = valid && (M == as_grid_desc_m_k[i].GetLength(
I0) &&
338 AK == as_grid_desc_m_k[i].GetLength(
I1));
344 valid && (bs_grid_desc_n_k[i].GetElementSpaceSize() *
sizeof(BDataType) <= TwoGB);
345 valid = valid && (N == bs_grid_desc_n_k[i].GetLength(
I0) &&
346 BK == bs_grid_desc_n_k[i].GetLength(
I1));
350 valid = valid && (M == ds_grid_desc_m_n[i].GetLength(
I0) &&
351 N == ds_grid_desc_m_n[i].GetLength(
I1));
360 if(!(M % MPerBlock == 0 && N % NPerBlock == 0 && AK % KPerBlock == 0))
366 const auto num_k_loop = AK / KPerBlock;
368 if(!GridwiseGemmPipe::IsSupported(num_k_loop))
374 if(!block_2_etile_map.CheckValidity(e_grid_desc_m_n))
382 if(!(e_grid_desc_m_n.GetElementSpaceSize() *
sizeof(EDataType) <= TwoGB))
392 const index_t num_loop = K / KPerBlock;
394 return GridwiseGemmPipe::CalculateHasMainLoop(num_loop);
401 template <
typename ALayout, GemmSpecialization GemmSpec>
402 __host__ __device__
static auto
405 constexpr
auto matrix_padder =
407 MPerBlock, NPerBlock, KPerBlock};
409 const auto a_grid_desc_mraw_kraw = [&]() {
410 if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ALayout>)
415 else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ALayout>)
422 return matrix_padder.PadADescriptor_M_K(a_grid_desc_mraw_kraw);
425 template <
typename AsLayout, GemmSpecialization GemmSpec>
427 #ifdef CK_CODE_GEN_RTC
432 const std::array<index_t, NumATensor>& MRaws,
433 const std::array<index_t, NumATensor>& KRaws,
434 const std::array<index_t, NumATensor>& AsStride
442 return MakeAGridDescriptor_M_K<ALayout, GemmSpec>(MRaws[i], KRaws[i], AsStride[i]);
447 template <
typename BLayout, GemmSpecialization GemmSpec>
448 __host__ __device__
static auto
451 constexpr
auto matrix_padder =
453 MPerBlock, NPerBlock, KPerBlock};
455 const auto b_grid_desc_nraw_kraw = [&]() {
468 return matrix_padder.PadBDescriptor_N_K(b_grid_desc_nraw_kraw);
471 template <
typename BsLayout, GemmSpecialization GemmSpec>
473 #ifdef CK_CODE_GEN_RTC
478 const std::array<index_t, NumBTensor>& NRaws,
479 const std::array<index_t, NumBTensor>& KRaws,
480 const std::array<index_t, NumBTensor>& BsStride
488 return MakeBGridDescriptor_N_K<BLayout, GemmSpec>(NRaws[i], KRaws[i], BsStride[i]);
493 template <
typename ELayout, GemmSpecialization GemmSpec>
494 __host__ __device__
static auto
497 constexpr
auto matrix_padder =
499 MPerBlock, NPerBlock, KPerBlock};
500 const auto e_grid_desc_mraw_nraw = [&]() {
513 return matrix_padder.PadCDescriptor_M_N(e_grid_desc_mraw_nraw);
516 template <
typename DsLayout, GemmSpecialization GemmSpec>
518 #ifdef CK_CODE_GEN_RTC
523 const std::array<index_t, NumDTensor>& MRaws,
524 const std::array<index_t, NumDTensor>& NRaws,
525 const std::array<index_t, NumDTensor>& DsStride
533 return MakeEGridDescriptor_M_N<DLayout, GemmSpec>(MRaws[i], NRaws[i], DsStride[i]);
538 __device__ __host__
static constexpr
auto GetMPerBlock() {
return MPerBlock; }
540 template <
bool HasMainKBlockLoop,
541 typename AsGridDesc_AK0_M_AK1,
542 typename BsGridDesc_BK0_N_BK1,
543 typename DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock,
544 typename EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock,
545 typename Block2ETileMap>
549 EDataType* __restrict__ p_e_grid,
550 void* __restrict__ p_shared,
551 const AElementwiseOperation& a_element_op,
552 const BElementwiseOperation& b_element_op,
553 const CDEElementwiseOperation& cde_element_op,
554 const AsGridDesc_AK0_M_AK1 as_grid_desc_ak0_m_ak1,
555 const BsGridDesc_BK0_N_BK1 bs_grid_desc_bk0_n_bk1,
556 const DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock&
557 ds_grid_desc_mblock_mperblock_nblock_nperblock,
558 const EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock&
559 e_grid_desc_mblock_mperblock_nblock_nperblock,
560 const Block2ETileMap& block_2_etile_map)
564 return make_dynamic_buffer<AddressSpaceEnum::Global>(
565 p_as_grid[i], as_grid_desc_ak0_m_ak1[i].GetElementSpaceSize());
571 return make_dynamic_buffer<AddressSpaceEnum::Global>(
572 p_bs_grid[i], bs_grid_desc_bk0_n_bk1[i].GetElementSpaceSize());
578 return make_dynamic_buffer<AddressSpaceEnum::Global>(
580 ds_grid_desc_mblock_mperblock_nblock_nperblock[i].GetElementSpaceSize());
584 auto e_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
585 p_e_grid, e_grid_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
588 const auto block_work_idx =
591 if(!block_2_etile_map.ValidCTileIndex(
593 make_tuple(e_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I0),
594 e_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I2))))
599 const index_t m_block_data_idx_on_grid =
600 __builtin_amdgcn_readfirstlane(block_work_idx[
I0] * MPerBlock);
602 const index_t n_block_data_idx_on_grid =
603 __builtin_amdgcn_readfirstlane(block_work_idx[
I1] * NPerBlock);
614 const auto idx_as_block_begin =
622 decltype(as_grid_desc_ak0_m_ak1),
623 decltype(
tie(a_block_desc_ak0_m_ak1)),
624 AElementwiseOperation,
627 ABlockTransferThreadClusterLengths_AK0_M_AK1,
628 ABlockTransferThreadClusterArrangeOrder,
629 ABlockTransferSrcAccessOrder,
631 ABlockTransferSrcVectorDim,
633 ABlockTransferSrcScalarPerVector,
634 ABlockTransferDstScalarPerVector_AK1,
638 tie(a_block_desc_ak0_m_ak1),
642 const auto idx_bs_block_begin =
650 decltype(bs_grid_desc_bk0_n_bk1),
651 decltype(
tie(b_block_desc_bk0_n_bk1)),
652 BElementwiseOperation,
655 BBlockTransferThreadClusterLengths_BK0_N_BK1,
656 BBlockTransferThreadClusterArrangeOrder,
657 BBlockTransferSrcAccessOrder,
659 BBlockTransferSrcVectorDim,
661 BBlockTransferSrcScalarPerVector,
662 BBlockTransferDstScalarPerVector_BK1,
666 tie(b_block_desc_bk0_n_bk1),
687 decltype(a_block_desc_ak0_m_ak1),
688 decltype(b_block_desc_bk0_n_bk1),
696 auto c_thread_buf = blockwise_gemm.GetCThreadBuffer();
700 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
702 auto a_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
703 static_cast<AComputeDataType*
>(p_shared), a_block_desc_ak0_m_ak1.GetElementSpaceSize());
705 auto b_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
707 b_block_desc_bk0_n_bk1.GetElementSpaceSize());
712 const index_t num_k_block_main_loop = __builtin_amdgcn_readfirstlane(
713 (as_grid_desc_ak0_m_ak1[
I0].GetLength(
I0) * as_grid_desc_ak0_m_ak1[
I0].GetLength(
I2)) /
717 const auto gridwise_gemm_pipeline =
718 GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>();
720 gridwise_gemm_pipeline.template Run<HasMainKBlockLoop>(as_grid_desc_ak0_m_ak1,
721 a_block_desc_ak0_m_ak1,
725 a_block_slice_copy_step,
726 bs_grid_desc_bk0_n_bk1,
727 b_block_desc_bk0_n_bk1,
731 b_block_slice_copy_step,
734 num_k_block_main_loop);
738 static_assert(MXdlPerWave % CShuffleMXdlPerWavePerShuffle == 0 &&
739 NXdlPerWave % CShuffleNXdlPerWavePerShuffle == 0,
742 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
743 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
746 constexpr
auto c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2 =
747 blockwise_gemm.GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
751 constexpr
auto c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp =
752 blockwise_gemm.GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
754 constexpr
auto M0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I0);
755 constexpr
auto N0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I1);
756 constexpr
auto M1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I2);
757 constexpr
auto N1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I3);
758 constexpr
auto M2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I4);
759 constexpr
auto M3 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I5);
760 constexpr
auto M4 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I6);
761 constexpr
auto N2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I7);
763 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
766 auto c_shuffle_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
767 static_cast<CShuffleDataType*
>(p_shared),
768 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
771 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock,
791 const auto c_thread_mtx_on_block =
792 blockwise_gemm.CalculateCThreadOriginDataIndex(
I0,
I0,
I0,
I0);
794 const index_t m_thread_data_on_block = c_thread_mtx_on_block[
I0];
795 const index_t n_thread_data_on_block = c_thread_mtx_on_block[
I1];
797 const auto m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor =
803 const auto m_thread_data_on_block_idx =
804 m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor.CalculateBottomIndex(
807 const auto n_thread_data_on_block_to_n0_n1_n2_adaptor =
813 const auto n_thread_data_on_block_idx =
814 n_thread_data_on_block_to_n0_n1_n2_adaptor.CalculateBottomIndex(
818 auto c_thread_copy_vgpr_to_lds =
821 decltype(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2),
822 decltype(c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2),
824 Sequence<CShuffleMXdlPerWavePerShuffle,
825 CShuffleNXdlPerWavePerShuffle,
838 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
841 m_thread_data_on_block_idx[
I1],
842 n_thread_data_on_block_idx[
I1],
843 m_thread_data_on_block_idx[
I2],
844 m_thread_data_on_block_idx[
I3],
845 m_thread_data_on_block_idx[
I4],
846 n_thread_data_on_block_idx[
I2]),
851 tie(c_shuffle_block_desc_mblock_mperblock_nblock_nperblock),
853 [&](
auto i) ->
const auto&
854 {
return ds_grid_desc_mblock_mperblock_nblock_nperblock[i]; },
859 tie(c_shuffle_block_buf),
861 [&](
auto i) ->
const auto&
862 {
return ds_grid_buf[i]; },
879 decltype(c_ds_desc_refs),
880 decltype(
tie(e_grid_desc_mblock_mperblock_nblock_nperblock)),
881 CDEElementwiseOperation,
885 CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
887 CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>,
888 CDEBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
894 CDEShuffleBlockTransferScalarPerVector_NPerBlock,
895 CDEShuffleBlockTransferScalarPerVector_NPerBlock,
902 idx_c_ds_block_begin,
903 tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
908 constexpr
auto sfc_c_vgpr =
911 Sequence<CShuffleMXdlPerWavePerShuffle,
912 CShuffleNXdlPerWavePerShuffle,
921 constexpr
auto sfc_cde_block =
925 CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
927 CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>>{};
929 constexpr
index_t num_access = sfc_c_vgpr.GetNumOfAccess();
931 static_assert(num_access == sfc_cde_block.GetNumOfAccess(),
"wrong!");
938 c_thread_copy_vgpr_to_lds.Run(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2,
939 sfc_c_vgpr.GetIndexTupleOfNumber(access_id),
941 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
942 c_shuffle_block_buf);
948 cde_block_copy_lds_and_global.Run(
951 tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
954 if constexpr(access_id < num_access - 1)
956 constexpr
auto cde_lds_and_global_step =
957 sfc_cde_block.GetForwardStep(access_id);
961 cde_block_copy_lds_and_global.MoveSrcSliceWindow(
962 c_ds_desc_refs, i +
I1, cde_lds_and_global_step);
966 cde_block_copy_lds_and_global.MoveDstSliceWindow(
967 tie(e_grid_desc_mblock_mperblock_nblock_nperblock),
969 cde_lds_and_global_step);
975 template <
bool HasMainKBlockLoop,
981 typename Block2ETileMap>
985 void* __restrict__ p_e_grid_,
986 void* __restrict__ p_shared,
987 const AElementwiseOperation& a_element_op,
988 const BElementwiseOperation& b_element_op,
989 const CDEElementwiseOperation& cde_element_op,
993 #ifdef CK_CODE_GEN_RTC
998 const std::array<index_t, NumATensor> StrideAs,
999 const std::array<index_t, NumBTensor> StrideBs,
1000 const std::array<index_t, NumDTensor> StrideDs,
1003 const Block2ETileMap& block_2_etile_map)
1005 using AsGridDesc_M_K =
1007 using BsGridDesc_N_K =
1009 using DsGridDesc_M_N =
1012 const auto p_e_grid =
reinterpret_cast<EDataType*
>(p_e_grid_);
1014 AsGridDesc_M_K as_grid_desc_m_k;
1015 BsGridDesc_N_K bs_grid_desc_n_k;
1016 DsGridDesc_M_N ds_grid_desc_m_n;
1021 as_grid_desc_m_k(j) = MakeAGridDescriptor_M_K<ALayout, GemmSpec>(M, K, StrideAs[j]);
1027 bs_grid_desc_n_k(j) = MakeBGridDescriptor_N_K<BLayout, GemmSpec>(N, K, StrideBs[j]);
1033 ds_grid_desc_m_n(j) = MakeEGridDescriptor_M_N<DLayout, GemmSpec>(M, N, StrideDs[j]);
1036 const auto e_grid_desc_m_n = MakeEGridDescriptor_M_N<ELayout, GemmSpec>(M, N, StrideE);
1043 const auto ds_grid_desc_mblock_mperblock_nblock_nperblock =
1046 const auto e_grid_desc_mblock_mperblock_nblock_nperblock =
1049 Run<HasMainKBlockLoop>(p_as_grid,
1057 as_grid_desc_ak0_m_ak1,
1058 bs_grid_desc_bk0_n_bk1,
1059 ds_grid_desc_mblock_mperblock_nblock_nperblock,
1060 e_grid_desc_mblock_mperblock_nblock_nperblock,
Y __host__ constexpr __device__ auto lcm(X x, Y y)
Definition: math.hpp:198
__host__ constexpr __device__ auto integer_least_multiple(X x, Y y)
Definition: math.hpp:78
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
GemmSpecialization
Definition: gemm_specialization.hpp:11
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
constexpr auto BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector()
Definition: blockwise_gemm_xdlops.hpp:606
__host__ constexpr __device__ auto generate_tie(F &&f, Number< N >)
Definition: tuple_helper.hpp:22
__host__ constexpr __device__ auto make_naive_tensor_descriptor(const Tuple< Lengths... > &lengths, const Tuple< Strides... > &strides)
Definition: tensor_descriptor_helper.hpp:49
typename uniform_sequence_gen< NSize, I >::type uniform_sequence_gen_t
Definition: sequence.hpp:901
typename tuple_element< I, TTuple >::type tuple_element_t
Definition: tuple.hpp:208
__host__ constexpr __device__ auto generate_tuple(F &&f, Number< N >)
Definition: tuple_helper.hpp:15
InMemoryDataOperationEnum
Definition: ck.hpp:267
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
int64_t long_index_t
Definition: ck.hpp:290
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:429
ushort bhalf_t
Definition: data_type.hpp:24
__host__ constexpr __device__ auto make_freeze_transform(const LowerIndex &low_idx)
Definition: multi_index_transform_helper.hpp:98
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:22
constexpr Tuple< Args &... > tie(Args &... args) noexcept
Definition: tuple.hpp:218
typename conditional< predicate, X, Y >::type conditional_t
Definition: functional.hpp:115
__host__ constexpr __device__ auto container_concat(const X &x, const Ys &... ys)
Definition: container_helper.hpp:320
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto concat_tuple_of_reference(const Tuple< X &... > &tx, const Tuple< Y &... > &ty)
Definition: tuple_helper.hpp:30
typename sequence_merge< Sx, Sy >::type sequence_merge_t
Definition: sequence.hpp:898
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:300
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
LoopScheduler
Definition: loop_scheduler.hpp:15
int32_t index_t
Definition: ck.hpp:289
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
__device__ void block_sync_lds()
Definition: synchronization.hpp:10
PipelineVersion
Definition: gridwise_gemm_pipeline_selector.hpp:17
Definition: block_to_ctile_map.hpp:260
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:77
__host__ static constexpr __device__ auto MakeEGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const EGridDesc_M_N &e_grid_desc_m_n)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:262
static constexpr auto I7
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:91
static constexpr auto BK1
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:95
static constexpr auto MakeBsGridPointer()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:157
__host__ static constexpr __device__ index_t GetSharedMemoryNumberOfByte()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:180
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:99
AComputeDataType_ AComputeDataType
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:110
static constexpr auto I4
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:88
static constexpr auto I2
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:86
__host__ static constexpr __device__ auto MakeDsGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const DsGridDesc_M_N &ds_grid_desc_m_n)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:283
__host__ static __device__ auto MakeAsGridDescriptor_M_K(const std::array< index_t, NumATensor > &MRaws, const std::array< index_t, NumATensor > &KRaws, const std::array< index_t, NumATensor > &AsStride)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:426
BComputeDataType_ BComputeDataType
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:111
static constexpr auto I1
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:85
__host__ static constexpr __device__ auto MakeDefaultBGridDescriptor_BK0_N_BK1(const BGridDesc_N_K &b_grid_desc_n_k)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:236
__host__ static __device__ auto MakeDsGridDescriptor_M_N(const std::array< index_t, NumDTensor > &MRaws, const std::array< index_t, NumDTensor > &NRaws, const std::array< index_t, NumDTensor > &DsStride)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:517
static constexpr auto I6
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:90
static __device__ void Run(AsGridPointer p_as_grid, BsGridPointer p_bs_grid, DsGridPointer p_ds_grid, EDataType *__restrict__ p_e_grid, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const AsGridDesc_AK0_M_AK1 as_grid_desc_ak0_m_ak1, const BsGridDesc_BK0_N_BK1 bs_grid_desc_bk0_n_bk1, const DsGridDesc_MBlock_MPerBlock_NBlock_NPerBlock &ds_grid_desc_mblock_mperblock_nblock_nperblock, const EGridDesc_MBlock_MPerBlock_NBlock_NPerBlock &e_grid_desc_mblock_mperblock_nblock_nperblock, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:546
__host__ static constexpr __device__ auto GetCShuffleBlockDescriptor_MBlock_MPerBlock_NBlock_NPerBlock()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:131
__host__ static constexpr __device__ auto GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:122
__host__ static constexpr __device__ auto MakeDefaultAsGridDescriptor_AK0_M_AK1(const AsGridDesc_M_K &as_grid_desc_m_k)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:226
static constexpr auto AK0PerBlock
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:96
remove_cvref_t< decltype(GridwiseGemmPipeline_Selector< PipelineVer, NumGemmKPrefetchStage, LoopSched >())> GridwiseGemmPipe
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:102
decltype(MakeAsGridPointer()) AsGridPointer
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:397
__host__ static __device__ auto MakeBGridDescriptor_N_K(const index_t NRaw, const index_t KRaw, const index_t StrideB)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:449
static constexpr auto I3
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:87
__host__ static constexpr __device__ auto MakeDefaultBlock2ETileMap(const EGridDesc_M_N &e_grid_desc_m_n)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:295
static constexpr auto I0
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:84
__host__ static constexpr __device__ auto MakeDefaultBsGridDescriptor_BK0_N_BK1(const BsGridDesc_N_K &bs_grid_desc_n_k)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:252
static constexpr auto MakeAsGridPointer()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:146
static constexpr auto I5
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:89
decltype(MakeBsGridPointer()) BsGridPointer
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:398
static __device__ void Run(AsGridPointer p_as_grid, BsGridPointer p_bs_grid, DsGridPointer p_ds_grid, void *__restrict__ p_e_grid_, void *__restrict__ p_shared, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CDEElementwiseOperation &cde_element_op, const index_t M, const index_t N, const index_t K, const std::array< index_t, NumATensor > StrideAs, const std::array< index_t, NumBTensor > StrideBs, const std::array< index_t, NumDTensor > StrideDs, const index_t StrideE, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:982
__host__ static constexpr __device__ bool CalculateHasMainKBlockLoop(index_t K)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:390
__host__ static constexpr __device__ auto MakeDefaultAGridDescriptor_AK0_M_AK1(const AGridDesc_M_K &a_grid_desc_m_k)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:210
static constexpr auto BK0PerBlock
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:97
decltype(MakeDsGridPointer()) DsGridPointer
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:399
__host__ static constexpr __device__ bool CheckValidity(const AsGridDesc_M_K &as_grid_desc_m_k, const BsGridDesc_N_K &bs_grid_desc_n_k, const DsGridDesc_M_N &ds_grid_desc_m_n, const EGridDesc_M_N &e_grid_desc_m_n, const Block2ETileMap &block_2_etile_map)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:307
__host__ static __device__ auto MakeEGridDescriptor_M_N(index_t MRaw, index_t NRaw, index_t StrideE)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:495
__host__ static __device__ auto MakeAGridDescriptor_M_K(index_t MRaw, index_t KRaw, index_t StrideA)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:403
__host__ static __device__ auto MakeBsGridDescriptor_N_K(const std::array< index_t, NumBTensor > &NRaws, const std::array< index_t, NumBTensor > &KRaws, const std::array< index_t, NumBTensor > &BsStride)
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:472
static constexpr auto AK1
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:94
static constexpr index_t NumBTensor
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:79
__host__ static constexpr __device__ auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:114
static constexpr index_t NumDTensor
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:80
__device__ static constexpr __host__ auto GetMPerBlock()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:538
ck::tensor_operation::device::GemmSpecialization GemmSpecialization
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:82
static constexpr auto MakeDsGridPointer()
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:169
static constexpr index_t NumATensor
Definition: gridwise_gemm_multiple_abd_xdl_cshuffle.hpp:78
Definition: xdlops_gemm.hpp:886
Definition: sequence.hpp:43
Definition: tensor_space_filling_curve.hpp:20
Definition: thread_group_tensor_slice_transfer_v7r2.hpp:47
Definition: threadwise_tensor_slice_transfer.hpp:39
Definition: tuple.hpp:117
Definition: integral_constant.hpp:10
Definition: functional2.hpp:31
Definition: matrix_padder.hpp:180
Definition: unary_element_wise_operation.hpp:241