20 template <
typename Gr
idwiseGemm,
bool HasMainKBlockLoop>
22 #if CK_USE_LAUNCH_BOUNDS
27 #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx908__) || defined(__gfx90a__) || \
29 __shared__
char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
31 GridwiseGemm::template Run<HasMainKBlockLoop>(
32 karg.p_a_grid, karg.p_b_grid, karg.p_c_grid, p_shared, karg);
38 template <
typename GridwiseGemm,
42 bool HasMainKBlockLoop>
44 #if CK_USE_LAUNCH_BOUNDS
48 const FloatB* __restrict__ p_b_grid,
49 FloatC* __restrict__ p_c_grid,
50 typename GridwiseGemm::Problem problem)
52 #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx908__) || defined(__gfx90a__) || \
54 __shared__
char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
56 GridwiseGemm::template Run<HasMainKBlockLoop>(p_a_grid, p_b_grid, p_c_grid, p_shared, problem);
65 template <
typename ALayout,
70 typename FloatGemmAcc,
71 typename FloatCShuffle,
73 typename AElementwiseOperation,
74 typename BElementwiseOperation,
75 typename CElementwiseOperation,
89 typename ABlockTransferThreadClusterLengths_AK0_M_AK1,
90 typename ABlockTransferThreadClusterArrangeOrder,
91 typename ABlockTransferSrcAccessOrder,
92 index_t ABlockTransferSrcVectorDim,
93 index_t ABlockTransferSrcScalarPerVector,
94 index_t ABlockTransferDstScalarPerVector_AK1,
95 bool AThreadTransferSrcResetCoordinateAfterRun,
97 typename BBlockTransferThreadClusterLengths_BK0_N_BK1,
98 typename BBlockTransferThreadClusterArrangeOrder,
99 typename BBlockTransferSrcAccessOrder,
100 index_t BBlockTransferSrcVectorDim,
101 index_t BBlockTransferSrcScalarPerVector,
102 index_t BBlockTransferDstScalarPerVector_BK1,
103 bool BThreadTransferSrcResetCoordinateAfterRun,
105 index_t CShuffleMXdlPerWavePerShuffle,
106 index_t CShuffleNXdlPerWavePerShuffle,
107 typename CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
108 index_t CShuffleBlockTransferScalarPerVector_NPerBlock,
111 typename ComputeTypeA = FloatC,
112 typename ComputeTypeB = ComputeTypeA>
156 if constexpr(GemmSpec == GemmSpecialization::MKPadding ||
157 GemmSpec == GemmSpecialization::MNKPadding ||
158 GemmSpec == GemmSpecialization::KPadding ||
159 GemmSpec == GemmSpecialization::NKPadding)
173 if constexpr(GemmSpec == GemmSpecialization::NKPadding ||
174 GemmSpec == GemmSpecialization::MNKPadding ||
175 GemmSpec == GemmSpecialization::KPadding ||
176 GemmSpec == GemmSpecialization::MKPadding)
199 const auto a_grid_desc_mraw_kraw = [&]() {
200 if constexpr(is_same_v<tensor_layout::gemm::RowMajor, ALayout>)
204 else if constexpr(is_same_v<tensor_layout::gemm::ColumnMajor, ALayout>)
212 if constexpr(GemmSpec == GemmSpecialization::MKPadding ||
213 GemmSpec == GemmSpecialization::MNKPadding)
216 const auto a_grid_desc_m_k =
230 return a_grid_desc_ak0_m_ak1;
232 else if constexpr(GemmSpec == GemmSpecialization::MPadding ||
233 GemmSpec == GemmSpecialization::MNPadding)
237 a_grid_desc_mraw_kraw,
243 return a_grid_desc_ak0_m_ak1;
245 else if constexpr(GemmSpec == GemmSpecialization::KPadding ||
246 GemmSpec == GemmSpecialization::NKPadding)
250 a_grid_desc_mraw_kraw,
262 return a_grid_desc_ak0_m_ak1;
268 a_grid_desc_mraw_kraw,
274 return a_grid_desc_ak0_m_ak1;
281 const auto b_grid_desc_nraw_kraw = [&]() {
294 if constexpr(GemmSpec == GemmSpecialization::NKPadding ||
295 GemmSpec == GemmSpecialization::MNKPadding)
298 const auto b_grid_desc_n_k =
312 return b_grid_desc_bk0_n_bk1;
314 else if constexpr(GemmSpec == GemmSpecialization::NPadding ||
315 GemmSpec == GemmSpecialization::MNPadding)
319 b_grid_desc_nraw_kraw,
325 return b_grid_desc_bk0_n_bk1;
327 else if constexpr(GemmSpec == GemmSpecialization::KPadding ||
328 GemmSpec == GemmSpecialization::MKPadding)
332 b_grid_desc_nraw_kraw,
344 return b_grid_desc_bk0_n_bk1;
350 b_grid_desc_nraw_kraw,
356 return b_grid_desc_bk0_n_bk1;
360 __host__ __device__
static auto
363 const auto c_grid_desc_mraw_nraw = [&]() {
376 if constexpr(GemmSpec == GemmSpecialization::MNPadding ||
377 GemmSpec == GemmSpecialization::MNKPadding)
386 else if constexpr(GemmSpec == GemmSpecialization::MPadding ||
387 GemmSpec == GemmSpecialization::MKPadding)
391 c_grid_desc_mraw_nraw,
396 else if constexpr(GemmSpec == GemmSpecialization::NPadding ||
397 GemmSpec == GemmSpecialization::NKPadding)
401 c_grid_desc_mraw_nraw,
409 return c_grid_desc_mraw_nraw;
439 std::cout <<
"problem {"
449 <<
"AK0:" <<
AK0 <<
", "
450 <<
"BK0:" <<
BK0 <<
", "
451 <<
"MBlock: " <<
MBlock <<
", "
452 <<
"NBlock: " <<
NBlock <<
"}" << std::endl;
474 const FloatB* p_b_grid_,
482 :
Problem{M_, N_, K_, StrideA_, StrideB_, StrideC_},
496 decltype(GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage, LoopSched>())>;
516 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
517 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
519 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
526 return c_shuffle_block_desc_mblock_mperblock_nblock_nperblock;
539 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
542 b_block_desc_bk0_n_bk1.GetElementSpaceSize(), max_lds_align);
545 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
548 constexpr
auto c_block_size =
549 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize();
551 return math::max((a_block_space_size_aligned *
sizeof(ComputeTypeA) +
552 b_block_space_size_aligned *
sizeof(ComputeTypeB)),
553 c_block_size *
sizeof(FloatCShuffle));
559 static_assert((MPerBlock % (MPerXdl * MXdlPerWave) == 0) &&
560 (NPerBlock % (NXdlPerWave * NPerXdl)) == 0,
561 "Invalid tuning param!");
568 if(!(problem.
M % MPerBlock == 0))
579 if(!(problem.
N % NPerBlock == 0))
598 if(!(problem.
K % AK1Value == 0) || !(problem.
K % BK1Value == 0))
606 if(problem.
K % ABlockTransferSrcScalarPerVector != 0)
613 if(problem.
M % ABlockTransferSrcScalarPerVector != 0)
621 if(problem.
N % BBlockTransferSrcScalarPerVector != 0)
628 if(problem.
K % BBlockTransferSrcScalarPerVector != 0)
636 if(problem.
N % CShuffleBlockTransferScalarPerVector_NPerBlock != 0)
643 if(problem.
M % CShuffleBlockTransferScalarPerVector_NPerBlock != 0)
650 const auto num_k_loop = (
CalculateAK0(problem.
K) * AK1Value) / KPerBlock;
652 if(!GridwiseGemmPipe::IsSupported(num_k_loop))
663 const index_t num_loop = K / KPerBlock;
665 return GridwiseGemmPipe::CalculateHasMainLoop(num_loop);
668 template <
typename CGr
idDesc>
679 return c_grid_desc_mblock_mperblock_nblock_nperblock;
685 template <
bool HasMainKBlockLoop>
686 __device__
static void Run(
const FloatA* __restrict__ p_a_grid,
687 const FloatB* __restrict__ p_b_grid,
688 FloatC* __restrict__ p_c_grid,
689 void* __restrict__ p_shared,
699 const auto c_grid_desc_mblock_mperblock_nblock_nperblock =
703 const auto a_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
704 p_a_grid, a_grid_desc_ak0_m_ak1.GetElementSpaceSize());
705 const auto b_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
706 p_b_grid, b_grid_desc_bk0_n_bk1.GetElementSpaceSize());
707 auto c_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
708 p_c_grid, c_grid_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
710 const AElementwiseOperation a_element_op{};
711 const BElementwiseOperation b_element_op{};
712 const CElementwiseOperation c_element_op{};
717 const auto block_work_idx =
720 if(!block_2_ctile_map.ValidCTileIndex(
722 make_tuple(c_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I0),
723 c_grid_desc_mblock_mperblock_nblock_nperblock.GetLength(
I2))))
729 const index_t m_block_data_idx_on_grid =
730 __builtin_amdgcn_readfirstlane(block_work_idx[
I0] * MPerBlock);
732 const index_t n_block_data_idx_on_grid =
733 __builtin_amdgcn_readfirstlane(block_work_idx[
I1] * NPerBlock);
745 auto a_blockwise_copy =
747 AElementwiseOperation,
751 ABlockTransferThreadClusterLengths_AK0_M_AK1,
752 ABlockTransferThreadClusterArrangeOrder,
755 decltype(a_grid_desc_ak0_m_ak1),
756 decltype(a_block_desc_ak0_m_ak1),
757 ABlockTransferSrcAccessOrder,
759 ABlockTransferSrcVectorDim,
761 ABlockTransferSrcScalarPerVector,
762 ABlockTransferDstScalarPerVector_AK1,
765 AThreadTransferSrcResetCoordinateAfterRun,
767 NumGemmKPrefetchStage>(
768 a_grid_desc_ak0_m_ak1,
771 a_block_desc_ak0_m_ak1,
776 auto b_blockwise_copy =
778 BElementwiseOperation,
782 BBlockTransferThreadClusterLengths_BK0_N_BK1,
783 BBlockTransferThreadClusterArrangeOrder,
786 decltype(b_grid_desc_bk0_n_bk1),
787 decltype(b_block_desc_bk0_n_bk1),
788 BBlockTransferSrcAccessOrder,
790 BBlockTransferSrcVectorDim,
792 BBlockTransferSrcScalarPerVector,
793 BBlockTransferDstScalarPerVector_BK1,
796 BThreadTransferSrcResetCoordinateAfterRun,
798 NumGemmKPrefetchStage>(
799 b_grid_desc_bk0_n_bk1,
802 b_block_desc_bk0_n_bk1,
822 decltype(a_block_desc_ak0_m_ak1),
823 decltype(b_block_desc_bk0_n_bk1),
831 auto c_thread_buf = blockwise_gemm.GetCThreadBuffer();
835 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
837 auto a_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
838 static_cast<ComputeTypeA*
>(p_shared), a_block_desc_ak0_m_ak1.GetElementSpaceSize());
840 auto b_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
841 static_cast<ComputeTypeB*
>(p_shared) + a_block_space_size_aligned,
842 b_block_desc_bk0_n_bk1.GetElementSpaceSize());
848 static_assert(std::is_default_constructible_v<GridwiseGemmPipe>);
851 const index_t num_k_block_main_loop = __builtin_amdgcn_readfirstlane(
852 (a_grid_desc_ak0_m_ak1.GetLength(
I0) * a_grid_desc_ak0_m_ak1.GetLength(
I2)) /
855 gridwise_gemm_pipeline.template Run<HasMainKBlockLoop>(a_grid_desc_ak0_m_ak1,
856 a_block_desc_ak0_m_ak1,
860 a_block_slice_copy_step,
861 b_grid_desc_bk0_n_bk1,
862 b_block_desc_bk0_n_bk1,
866 b_block_slice_copy_step,
869 num_k_block_main_loop);
873 static_assert(MXdlPerWave % CShuffleMXdlPerWavePerShuffle == 0 &&
874 NXdlPerWave % CShuffleNXdlPerWavePerShuffle == 0,
877 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
878 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
881 constexpr
auto c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2 =
882 blockwise_gemm.GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
886 constexpr
auto c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp =
887 blockwise_gemm.GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
889 constexpr
auto M0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I0);
890 constexpr
auto N0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I1);
891 constexpr
auto M1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I2);
892 constexpr
auto N1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I3);
893 constexpr
auto M2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I4);
894 constexpr
auto M3 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I5);
895 constexpr
auto M4 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I6);
896 constexpr
auto N2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I7);
898 constexpr
auto c_shuffle_block_desc_mblock_mperblock_nblock_nperblock =
901 auto c_shuffle_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
902 static_cast<FloatCShuffle*
>(p_shared),
903 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock.GetElementSpaceSize());
906 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock,
926 const auto c_thread_mtx_on_block =
927 blockwise_gemm.CalculateCThreadOriginDataIndex(
I0,
I0,
I0,
I0);
929 const index_t m_thread_data_on_block = c_thread_mtx_on_block[
I0];
930 const index_t n_thread_data_on_block = c_thread_mtx_on_block[
I1];
932 const auto m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor =
938 const auto m_thread_data_on_block_idx =
939 m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor.CalculateBottomIndex(
942 const auto n_thread_data_on_block_to_n0_n1_n2_adaptor =
948 const auto n_thread_data_on_block_idx =
949 n_thread_data_on_block_to_n0_n1_n2_adaptor.CalculateBottomIndex(
953 auto c_thread_copy_vgpr_to_lds =
956 decltype(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2),
957 decltype(c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2),
959 Sequence<CShuffleMXdlPerWavePerShuffle,
960 CShuffleNXdlPerWavePerShuffle,
973 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
976 m_thread_data_on_block_idx[
I1],
977 n_thread_data_on_block_idx[
I1],
978 m_thread_data_on_block_idx[
I2],
979 m_thread_data_on_block_idx[
I3],
980 m_thread_data_on_block_idx[
I4],
981 n_thread_data_on_block_idx[
I2]),
987 CElementwiseOperation,
988 CGlobalMemoryDataOperation,
990 CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
992 CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>,
993 CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
997 decltype(c_shuffle_block_desc_mblock_mperblock_nblock_nperblock),
998 decltype(c_grid_desc_mblock_mperblock_nblock_nperblock),
1001 CShuffleBlockTransferScalarPerVector_NPerBlock,
1004 {c_shuffle_block_desc_mblock_mperblock_nblock_nperblock,
1006 c_grid_desc_mblock_mperblock_nblock_nperblock,
1011 constexpr
auto sfc_c_vgpr =
1014 Sequence<CShuffleMXdlPerWavePerShuffle,
1015 CShuffleNXdlPerWavePerShuffle,
1024 constexpr
auto sfc_c_global =
1028 CShuffleMXdlPerWavePerShuffle * MWave * MPerXdl,
1030 CShuffleNXdlPerWavePerShuffle * NWave * NPerXdl>>{};
1032 constexpr
index_t num_access = sfc_c_vgpr.GetNumOfAccess();
1034 static_assert(num_access == sfc_c_global.GetNumOfAccess(),
"wrong!");
1041 c_thread_copy_vgpr_to_lds.Run(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2,
1042 sfc_c_vgpr.GetIndexTupleOfNumber(access_id),
1044 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
1045 c_shuffle_block_buf);
1051 c_shuffle_block_copy_lds_to_global.Run(
1052 c_shuffle_block_desc_mblock_mperblock_nblock_nperblock,
1053 c_shuffle_block_buf,
1054 c_grid_desc_mblock_mperblock_nblock_nperblock,
1057 if constexpr(access_id < num_access - 1)
1059 constexpr
auto c_global_step = sfc_c_global.GetForwardStep(access_id);
1062 c_shuffle_block_copy_lds_to_global.MoveDstSliceWindow(
1063 c_grid_desc_mblock_mperblock_nblock_nperblock, c_global_step);
#define CK_MIN_BLOCK_PER_CU
Definition: ck.hpp:34
#define CK_MAX_THREAD_PER_BLOCK
Definition: ck.hpp:33
Y __host__ constexpr __device__ auto lcm(X x, Y y)
Definition: math.hpp:198
__host__ constexpr __device__ auto integer_least_multiple(X x, Y y)
Definition: math.hpp:78
__host__ constexpr __device__ auto integer_divide_ceil(X x, Y y)
Definition: math.hpp:72
__host__ constexpr __device__ auto integer_divide_floor(X x, Y y)
Definition: math.hpp:66
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
GemmSpecialization
Definition: gemm_specialization.hpp:11
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
constexpr auto BlockwiseGemmXdlops_k0mk1_k0nk1_m0n0m1n1m2m3m4n2_Selector()
Definition: blockwise_gemm_xdlops.hpp:606
__host__ constexpr __device__ auto make_naive_tensor_descriptor(const Tuple< Lengths... > &lengths, const Tuple< Strides... > &strides)
Definition: tensor_descriptor_helper.hpp:49
InMemoryDataOperationEnum
Definition: ck.hpp:267
__global__ void kernel_gemm_xdl_cshuffle_v1(typename GridwiseGemm::Argument karg)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:25
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:429
__host__ constexpr __device__ auto make_freeze_transform(const LowerIndex &low_idx)
Definition: multi_index_transform_helper.hpp:98
constexpr detail::ignore_t ignore
Definition: ignore.hpp:20
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:22
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:300
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
LoopScheduler
Definition: loop_scheduler.hpp:15
int32_t index_t
Definition: ck.hpp:289
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
__host__ constexpr __device__ auto make_right_pad_transform(const LowLength &low_length, const RightPadLength &right_pad, integral_constant< bool, SkipIsValidCheck >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:37
__device__ void block_sync_lds()
Definition: synchronization.hpp:10
PipelineVersion
Definition: gridwise_gemm_pipeline_selector.hpp:17
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:472
__host__ Argument(const FloatA *p_a_grid_, const FloatB *p_b_grid_, FloatC *p_c_grid_, index_t M_, index_t N_, index_t K_, index_t StrideA_, index_t StrideB_, index_t StrideC_)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:473
const FloatB * p_b_grid
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:490
const FloatA * p_a_grid
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:489
FloatC * p_c_grid
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:491
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:414
index_t NPadded
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:462
index_t N
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:456
index_t StrideA
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:458
index_t StrideB
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:459
index_t NBlock
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:467
index_t BK0
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:465
index_t KPadded
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:463
index_t MPadded
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:461
index_t K
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:457
index_t MBlock
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:466
__host__ void Print() const
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:437
__host__ Problem(index_t M_, index_t N_, index_t K_, index_t StrideA_, index_t StrideB_, index_t StrideC_)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:415
index_t M
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:455
index_t AK0
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:464
index_t StrideC
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:460
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:114
static constexpr __device__ auto GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1()
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:506
static constexpr __host__ bool CheckValidity(const Problem &problem)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:557
static constexpr auto BK1Number
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:128
static __host__ auto CalculateNPadded(index_t N)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:142
static __host__ auto CalculateAK0(index_t K)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:152
static __host__ auto CalculateMPadded(index_t M)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:137
static constexpr auto I6
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:121
static constexpr auto I1
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:116
static constexpr auto I2
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:117
static constexpr __host__ bool CalculateHasMainKBlockLoop(index_t K)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:661
static constexpr auto I5
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:120
__host__ static __device__ auto MakeCGridDescriptor_M_N(index_t M, index_t MPad, index_t N, index_t NPad, index_t StrideC)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:361
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:130
static __host__ auto CalculateBK0(index_t K)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:169
static __device__ void Run(const FloatA *__restrict__ p_a_grid, const FloatB *__restrict__ p_b_grid, FloatC *__restrict__ p_c_grid, void *__restrict__ p_shared, const Problem &problem)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:686
static __host__ auto CalculateNBlock(index_t N)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:191
static __device__ auto MakeBGridDescriptor_BK0_N_BK1(index_t K, index_t KPad, index_t N, index_t NPad, index_t StrideB, index_t BK0)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:278
static constexpr auto BK0Number
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:126
static __device__ auto MakeAGridDescriptor_AK0_M_AK1(index_t M, index_t MPad, index_t K, index_t KPad, index_t StrideA, index_t AK0)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:196
static constexpr auto I7
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:122
static constexpr auto I3
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:118
static __host__ auto CalculateKPadded(index_t K)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:147
static constexpr auto AK1Number
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:127
static __host__ auto CalculateMBlock(index_t M)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:186
static constexpr auto AK0Number
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:125
static constexpr __device__ index_t GetSharedMemoryNumberOfByte()
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:529
static constexpr auto I0
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:115
static constexpr __device__ auto GetCShuffleBlockDescriptor_MBlock_MPerBlock_NBlock_NPerBlock()
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:514
static constexpr auto I4
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:119
static constexpr __device__ auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1()
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:498
remove_cvref_t< decltype(GridwiseGemmPipeline_Selector< PipelineVer, NumGemmKPrefetchStage, LoopSched >())> GridwiseGemmPipe
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:496
static constexpr __device__ auto MakeCGridDescriptor_MBlock_MPerBlock_NBlock_NPerBlock(const CGridDesc &c_grid_desc_m_n, index_t MBlock, index_t NBlock)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:669
static __host__ auto CalculateGridSize(index_t M, index_t N)
Definition: gridwise_gemm_xdl_cshuffle_v1.hpp:132
Definition: xdlops_gemm.hpp:886
Definition: sequence.hpp:43
Definition: tensor_space_filling_curve.hpp:20
Blockwise data transfer.
Definition: thread_group_tensor_slice_transfer_v4r1.hpp:46
Definition: thread_group_tensor_slice_transfer_v6r1.hpp:34
Definition: threadwise_tensor_slice_transfer.hpp:39
Definition: integral_constant.hpp:10
Definition: functional2.hpp:31
Definition: device_base.hpp:50
Definition: unary_element_wise_operation.hpp:241