/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-7.0.0/include/ck/ck.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-7.0.0/include/ck/ck.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-7.0.0/include/ck/ck.hpp Source File
ck.hpp
Go to the documentation of this file.
1 // SPDX-License-Identifier: MIT
2 // Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
3 
4 #pragma once
5 
6 #include "ck/config.h"
7 
8 #if !defined(__HIPCC_RTC__) || !defined(CK_CODE_GEN_RTC)
9 #ifndef CK_DONT_USE_HIP_RUNTIME_HEADERS
10 #include "hip/hip_runtime.h"
11 #include "hip/hip_fp16.h"
12 #endif
13 #endif
14 // to do: add various levels of logging with CK_LOG_LEVEL
15 
16 #ifndef CK_TIME_KERNEL
17 #define CK_TIME_KERNEL 1
18 #endif
19 
20 // constant address space for kernel parameter
21 // https://llvm.org/docs/AMDGPUUsage.html#address-spaces
22 #define CK_CONSTANT_ADDRESS_SPACE __attribute__((address_space(4)))
23 
24 // launch bounds
25 #define CK_USE_LAUNCH_BOUNDS 1
26 
27 #ifdef CK_USE_LAUNCH_BOUNDS
28 // for most kernels
29 #define CK_MAX_THREAD_PER_BLOCK 256
30 #define CK_MIN_BLOCK_PER_CU 2
31 
32 // for wavelet GEMM kernel
33 #define CK_WAVELET_MAX_THREAD_PER_BLOCK 512
34 #define CK_WAVELET_MIN_BLOCK_PER_CU 2
35 #endif
36 
37 // kernel attribute: amdgpu_waves_per_eu()
38 #ifdef CK_USE_WAVES_PER_EU
39 // for 1-wave kernels, control arguments of amdgpu_waves_per_eu() attribute
40 #ifndef CK_MIN_WAVES_PER_EU
41 #define CK_MIN_WAVES_PER_EU 0
42 #endif
43 
44 #ifndef CK_MAX_WAVES_PER_EU
45 #define CK_MAX_WAVES_PER_EU 0
46 #endif
47 
48 #else
49 #define CK_USE_WAVES_PER_EU 0
50 #endif
51 
52 // define general macros for various architectures
53 #if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx942__) || defined(__gfx950__)
54 #define __gfx9__
55 #endif
56 #if defined(__gfx942__) || defined(__gfx950__)
57 #define __gfx94__
58 #endif
59 #if defined(__gfx1010__) || defined(__gfx1011__) || defined(__gfx1012__)
60 #define __gfx101__
61 #endif
62 #if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || \
63  defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || \
64  defined(__gfx10_3_generic__)
65 #define __gfx103__
66 #endif
67 #if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || \
68  defined(__gfx1103__) || defined(__gfx1150__) || defined(__gfx1151__) || \
69  defined(__gfx1152__) || defined(__gfx11_generic__)
70 #define __gfx11__
71 #endif
72 #if defined(__gfx1200__) || defined(__gfx1201__) || defined(__gfx12_generic__)
73 #define __gfx12__
74 #endif
75 
76 // buffer resource
77 #ifndef __HIP_DEVICE_COMPILE__ // for host code
78 #define CK_BUFFER_RESOURCE_3RD_DWORD -1
79 #elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || defined(__gfx9__)
80 #define CK_BUFFER_RESOURCE_3RD_DWORD 0x00020000
81 #elif defined(__gfx103__)
82 #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000
83 #elif defined(__gfx11__) || defined(__gfx12__)
84 #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31004000
85 #endif
86 
87 // FMA instruction
88 #ifndef __HIP_DEVICE_COMPILE__ // for host code, define nothing
89 #elif defined(__gfx803__) || defined(__gfx900__) // for GPU code
90 #define CK_USE_AMD_V_MAC_F32
91 #elif defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) // for GPU code
92 #define CK_USE_AMD_V_FMAC_F32
93 #define CK_USE_AMD_V_DOT2_F32_F16
94 #define CK_USE_AMD_V_DOT4_I32_I8
95 #elif defined(__gfx11__) || defined(__gfx12__)
96 #define CK_USE_AMD_V_FMAC_F32
97 #define CK_USE_AMD_V_DOT2_F32_F16
98 #define CK_USE_AMD_V_DOT4_I32_I8_GFX11
99 #endif
100 
101 // MFMA instruction
102 #ifndef __HIP_DEVICE_COMPILE__ // for host code
103 #define CK_USE_AMD_MFMA
104 #elif defined(__gfx9__) // for GPU code
105 #define CK_USE_AMD_MFMA
106 #endif
107 
108 #if(defined(__gfx90a__) || defined(__gfx94__))
109 #define CK_USE_AMD_MFMA_BF16_1K_OP
110 #endif
111 
112 #if defined(__gfx94__)
113 #define CK_USE_AMD_MFMA_GFX940
114 #endif
115 
116 // buffer load
117 #define CK_USE_AMD_BUFFER_LOAD 1
118 
119 // buffer store
120 #define CK_USE_AMD_BUFFER_STORE 1
121 
122 // buffer atomic add: integer
123 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_INTEGER 1
124 
125 // buffer atomic add: floating point
126 #ifndef __HIP_DEVICE_COMPILE__ // for host code
127 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 1
128 #elif defined(__gfx9__) || defined(__gfx12__) // for GPU code
129 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 1
130 #else // for GPU code
131 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 0
132 #endif
133 
134 #if(defined(__gfx90a__) || defined(__gfx94__)) // for GPU code
135 #define CK_USE_AMD_BUFFER_ATOMIC_MAX_FLOAT64 1
136 #else
137 #define CK_USE_AMD_BUFFER_ATOMIC_MAX_FLOAT64 0
138 #endif
139 
140 // inline asm
141 #define CK_USE_AMD_INLINE_ASM 1
142 
143 // inner product (V_MAC/V_FMAC)
144 #define CK_USE_AMD_V_MAC_INLINE_ASM 1
145 
146 // V_DOT inline instructions, less efficient since they require adding
147 // `s_nop`s to avoid hazard
148 #define CK_USE_AMD_V_DOT_INLINE_ASM 0
149 
150 // inner product using V_DOT with DPP8 modifiers
151 #define CK_USE_AMD_V_DOT_DPP8_INLINE_ASM 1
152 
153 // LDS direct loads using inline assembly
154 #define CK_USE_AMD_LDS_DIRECT_LOAD_INLINE_ASM 0
155 
156 // set rounding to nearest even as default for bf16 conversions
157 #define CK_USE_RNE_BF16_CONVERSION 1
158 
159 // set rounding to nearest even as default for f8 conversions
160 #define CK_USE_SR_F8_CONVERSION 0
161 
162 // set rounding to nearest even as default for f6 conversions
163 #define CK_USE_SR_F6_CONVERSION 0
164 
165 // set rounding to nearest even as default for f4 conversions
166 #define CK_USE_SR_F4_CONVERSION 0
167 
168 // shuffle pk_i4 values during conversion to optimize number of binary
169 // operations
170 #define CK_USE_PK4_LAYOUT_SHUFFLE 1
171 
172 // block synchronization only s_wait lgkmcnt(0), not vmcnt(0)
173 #define CK_EXPERIMENTAL_BLOCK_SYNC_LDS_WITHOUT_SYNC_VMEM 1
174 
175 // experimental feature: multi index implemented as array
176 #define CK_EXPERIMENTAL_USE_DYNAMICALLY_INDEXED_MULTI_INDEX 0
177 
178 // experimental feature: static tensor descriptor
179 #define CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR 0
180 
181 // experimental feature: buffer load/store/atomic-add/ OOB trick
182 // This (ifndef) is a hack to use customized behavior for buffer load rather than using default
183 // setting. Don't use this hack unless absolutely necessary!
184 // FIXME: make the behavior of buffer load a configurable (template) parameter for each usage
185 #ifndef CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK
186 #define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 0
187 #endif
188 #define CK_EXPERIMENTAL_USE_BUFFER_STORE_OOB_CHECK_OFFSET_TRICK 1
189 #define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_ADD_OOB_CHECK_OFFSET_TRICK 1
190 #define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_MAX_OOB_CHECK_OFFSET_TRICK 1
191 
192 // experimental feature: in-regsiter sub-dword transpose
193 #define CK_EXPERIMENTAL_USE_IN_REGISTER_SUB_DWORD_TRANSPOSE 1
194 
195 // experimental feature: merge transformation use magic number division
196 #define CK_EXPERIMENTAL_MERGE_USE_MAGIC_DIVISION 1
197 
198 // experimental feature: use __builtin_memcpy instead of pointer cast to access a vector from
199 // pointer of scalar
200 #define CK_EXPERIMENTAL_USE_MEMCPY_FOR_VECTOR_ACCESS 0
201 
202 // experimental feature: use __builtin_memcpy instead of union to do bit_cast
203 #define CK_EXPERIMENTAL_USE_MEMCPY_FOR_BIT_CAST 1
204 
205 // experimental feature: optimize for inter-wave scheduling policy
206 #define CK_EXPERIMENTAL_INTER_WAVE_SCHEDULING 1
207 #define CK_EXPERIMENTAL_INTER_WAVE_SCHEDULING_MAC_CLUSTERS 1
208 // this will let make_default_loop_scheduler() return interwave scheduling flag by default
209 #define CK_EXPERIMENTAL_DEFAULT_TO_INTER_WAVE_SCHEDULING 0
210 // experimental feature: add instances using interwave scheduling
211 #define CK_EXPERIMENTAL_INTER_WAVE_INSTANCES 1
212 // experimental feature: add instances using pipeline v2
213 #define CK_EXPERIMENTAL_PIPELINE_V2_INSTANCES 1
214 // experimental feature: optimize pipeline v2 by IGLP strategy (value=ID of strategy)
215 #ifndef CK_EXPERIMENTAL_PIPELINE_V2_IGLP_OPT
216 #define CK_EXPERIMENTAL_PIPELINE_V2_IGLP_OPT 0
217 #endif
218 
219 // hack: have underlying assumption that need to be satsified, otherwise it's a bug
220 // hack for forcing register to keep idx_diff_low_const in SGPR. idx_diff_low_const must be
221 // thread-invariant, otherwise it's a bug
222 // TODO: separate index calculation into "compile-time", "global", "block", "wave", "thread"
223 #define CK_HACK_MERGE_CALCULATE_IDX_DIFF_LOW_CONST_USE_AMD_GCN_READ_FIRST_LANE 0
224 
225 // workaround: conv crash when K, C is even
226 #define CK_WORKAROUND_DISABLE_FILTER1x1STRIDE1PAD0_WHEN_K_C_IS_EVEN 1
227 
228 // workaround: compiler crash when compiling recursive lambda
229 #define CK_WORKAROUND_SWDEV_275126 1
230 
231 // workaround: compiler crash when using buffer load/store for i8
232 #define CK_WORKAROUND_SWDEV_XXXXXX_INT8_BUFFER_LOAD_STORE_ISSUE 1
233 
234 // workaround: compiler gnerating inefficient ds_write instructions
235 #define CK_WORKAROUND_SWDEV_XXXXXX_INT8_DS_WRITE_ISSUE 1
236 
237 // workaround: verifaction failure, due to compiler regression, for conv bwd-data fp16 using some
238 // tuning parameter
239 #define CK_WORKAROUND_SWDEV_325164 0
240 
241 // workaround: compiler not emiting reciprocal instruction frm __frcp_rn()
242 #define CK_WORKAROUND_SWDEV_383542 1
243 
244 // workaround: compiler issue on gfx908
245 #define CK_WORKAROUND_SWDEV_388832 1
246 
247 // workaround: compiler issue on gfx950
248 #define CK_WORKAROUND_FP16_TO_FP8_CONVERSION 1
249 
250 // workaround: compiler issue on gfx950
251 #define CK_WORKAROUND_BF16_TO_FP8_CONVERSION 1
252 
253 // denorm test fix, necessary for gfx90a
254 #ifndef CK_GFX90A_DENORM_WORKAROUND
255 #define CK_GFX90A_DENORM_WORKAROUND 0
256 #endif // CK_GFX90A_DENORM_WORKAROUND
257 // Enable only for gfx90a
258 #if defined(__gfx90a__)
259 #if CK_GFX90A_DENORM_WORKAROUND
260 #define CK_GFX90A_DENORM_WORKAROUND 1
261 #endif // CK_GFX90A_DENORM_WORKAROUND is set to 1
262 #else
263 #define CK_GFX90A_DENORM_WORKAROUND 0
264 #endif // gfx90a
265 
266 // set flag to 1 to build deprecated instances
267 #define CK_BUILD_DEPRECATED 1
268 
269 namespace ck {
270 
271 #if defined(__GFX9__) || !defined(__HIP_DEVICE_COMPILE__)
272 __device__ static constexpr int WarpSize = 64;
273 #else
274 __device__ static constexpr int WarpSize = 32;
275 #endif
276 
278 {
279  Set,
280  AtomicAdd,
281  AtomicMax,
282  Add
283 };
284 
285 // FIXME: use regular Sequence and remove this
286 template <InMemoryDataOperationEnum... Is>
288 {
289  static constexpr int mSize = sizeof...(Is);
290 
291  __host__ __device__ static constexpr InMemoryDataOperationEnum At(int I)
292  {
293  // the last dummy element is to prevent compiler complain about empty array, when mSize = 0
295  return mData[I];
296  }
297 };
298 
299 // index type
300 using index_t = int32_t;
302 
303 } // namespace ck
int32_t int32_t
Definition: integer.hpp:10
Definition: ck.hpp:269
InMemoryDataOperationEnum
Definition: ck.hpp:278
int64_t long_index_t
Definition: ck.hpp:301
long int64_t
Definition: data_type.hpp:461
int32_t index_t
Definition: ck.hpp:300
__host__ static constexpr __device__ InMemoryDataOperationEnum At(int I)
Definition: ck.hpp:291
static constexpr int mSize
Definition: ck.hpp:289