19 template <
typename InOutElementFunc,
20 typename... InOutDstrTensors,
22 std::negation<std::is_same<std::remove_const_t<InOutDstrTensors>, null_tensor>>...>>>
24 InOutDstrTensors&... inout_dstr_tensors)
29 constexpr
index_t thread_buffer_size =
30 __type_pack_element<0, InOutDstrTensors...>::get_thread_buffer_size();
33 [&](
auto i) { inout_element_func(inout_dstr_tensors.get_thread_buffer().at(i)...); });
36 template <
typename InElementFunc,
39 std::conjunction_v<std::negation<std::is_same<InTensor, null_tensor>>...>>>
41 const InTensor&... in_dstr_tensors)
43 using OutDataType = decltype(in_element_func(
typename InTensor::DataType{}...));
47 constexpr
auto in_tile_dstr = __type_pack_element<0, InTensor...>::get_tile_distribution();
49 constexpr
index_t thread_buffer_size =
50 __type_pack_element<0, InTensor...>::get_thread_buffer_size();
52 auto out_dstr_tensor = make_static_distributed_tensor<OutDataType>(in_tile_dstr);
55 out_dstr_tensor.get_thread_buffer()(i) =
56 in_element_func(in_dstr_tensors.get_thread_buffer()[i]...);
59 return out_dstr_tensor;
70 template <
typename InElementFunc,
typename Tuple,
size_t... I>
73 std::index_sequence<I...>)
86 template <
typename InElementFunc,
typename Tuple>
90 static constexpr
auto size = Tuple::size();
94 template <
typename DstrTensors,
typename T>
99 x = type_convert<typename DstrTensors::DataType, remove_cvref_t<T>>(
value);
104 template <
typename T>
111 template <
typename DstrTensors, index_t v,
bool skip_subdword_opt = false>
115 using elem_type =
typename DstrTensors::DataType;
116 constexpr
index_t elem_size =
sizeof(elem_type);
118 constexpr
index_t tensor_bytes = DstrTensors::get_thread_buffer_size() * elem_size;
121 if constexpr(v == 0 && tensor_bytes % 4 == 0 && !skip_subdword_opt)
123 #if CK_TILE_WORKAROUND_ROCM_6_1_SCRATCH_MEMORY_ISSUE
124 auto& buffer = dstr_tensor.get_thread_buffer();
126 static_for<0, tensor_bytes / 4, 1>{}([&](
auto i_write) {
127 if constexpr(elem_size == 1)
130 constexpr
auto values = ext_vector_t<elem_type, 4>{0, 0, 0, 0};
132 buffer[i_write * 4 + 0] = values.x;
133 buffer[i_write * 4 + 1] = values.y;
134 buffer[i_write * 4 + 2] = values.z;
135 buffer[i_write * 4 + 3] = values.w;
137 else if constexpr(elem_size == 2)
140 constexpr
auto values = ext_vector_t<elem_type, 2>{0, 0};
142 buffer[i_write * 2 + 0] = values.x;
143 buffer[i_write * 2 + 1] = values.y;
145 else if constexpr(elem_size == 4)
148 constexpr elem_type
value = 0;
150 buffer[i_write] =
value;
154 static_assert(
false,
"type not supported");
158 using dvec_t = array<
index_t, tensor_bytes / 4>;
159 auto& tensor =
reinterpret_cast<dvec_t&
>(dstr_tensor.get_thread_buffer());
160 for(
auto i = 0; i < tensor.size(); i++)
176 template <
typename DstrTensors>
184 template <
typename OutDataType,
typename InTensor>
187 #if defined(__gfx94__) || defined(__gfx12__)
189 constexpr
auto in_tile_dstr = InTensor::get_tile_distribution();
191 constexpr
index_t thread_buffer_size = InTensor::get_thread_buffer_size();
192 static_assert(thread_buffer_size % 4 == 0);
193 constexpr
index_t thread_buffer_size_pk = thread_buffer_size / 4;
195 auto out_dstr_tensor = make_static_distributed_tensor<OutDataType>(in_tile_dstr);
196 #pragma clang diagnostic push
197 #pragma clang diagnostic ignored "-Wuninitialized"
203 uint32_t x = __builtin_amdgcn_cvt_pk_fp8_f32(
209 uint32_t y = __builtin_amdgcn_cvt_pk_fp8_f32(
217 vec_t d = bit_cast<vec_t>(y);
218 out_dstr_tensor.get_thread_buffer().template set_as<vec_t>(
number<i>{}, d);
220 #pragma clang diagnostic pop
222 return out_dstr_tensor;
230 template <
typename OutDataType,
typename InTensor>
233 #if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx942__)
235 constexpr
auto in_tile_dstr = InTensor::get_tile_distribution();
237 constexpr
index_t thread_buffer_size = InTensor::get_thread_buffer_size();
238 static_assert(thread_buffer_size % 2 == 0);
239 constexpr
index_t thread_buffer_size_pk = thread_buffer_size / 2;
241 auto out_dstr_tensor = make_static_distributed_tensor<OutDataType>(in_tile_dstr);
244 for(
index_t i = 0; i < thread_buffer_size_pk; i++)
246 auto o = __builtin_amdgcn_cvt_pkrtz(in_dstr_tensors.get_thread_buffer()[2 * i + 0],
247 in_dstr_tensors.get_thread_buffer()[2 * i + 1]);
249 out_dstr_tensor.get_thread_buffer().at(2 * i + 0) = o.x;
250 out_dstr_tensor.get_thread_buffer().at(2 * i + 1) = o.y;
253 return out_dstr_tensor;
261 template <
typename OutDataType,
typename InTensor>
266 constexpr
auto in_tile_dstr = InTensor::get_tile_distribution();
268 constexpr
index_t thread_buffer_size = InTensor::get_thread_buffer_size();
269 static_assert(thread_buffer_size % 2 == 0);
270 auto out_dstr_tensor = make_static_distributed_tensor<OutDataType>(in_tile_dstr);
271 using f16x2_t = std::conditional_t<std::is_same_v<OutDataType, fp16_t>,
fp16x2_t,
bf16x2_t>;
272 for(
index_t i = 0; i < thread_buffer_size / 2; i++)
274 auto o = type_convert<f16x2_t>(
fp32x2_t{
275 in_dstr_tensors.get_thread_buffer()[2 * i + 0],
276 in_dstr_tensors.get_thread_buffer()[2 * i + 1],
279 out_dstr_tensor.get_thread_buffer().at(2 * i + 0) = o.x;
280 out_dstr_tensor.get_thread_buffer().at(2 * i + 1) = o.y;
282 return out_dstr_tensor;
285 #if CK_TILE_USE_SUBDWORD_TILE_CAST
288 template <
typename OutDataType,
typename InTensor>
289 CK_TILE_DEVICE auto cast_tile_opt_subdword(
const InTensor& in_dstr_tensors)
291 constexpr
auto in_tile_dstr = InTensor::get_tile_distribution();
293 auto out_dstr_tensor = make_static_distributed_tensor<OutDataType>(in_tile_dstr);
297 constexpr
index_t i_elem_bytes =
sizeof(i_type);
298 constexpr
index_t o_elem_bytes =
sizeof(o_type);
299 static_assert(i_elem_bytes < 4 || o_elem_bytes < 4);
302 (i_elem_bytes >= o_elem_bytes) ? (4 / o_elem_bytes) : (4 / i_elem_bytes);
303 static_assert(bulk_size != 0);
308 constexpr
index_t thread_buffer_size = InTensor::get_thread_buffer_size();
310 constexpr
index_t iters = thread_buffer_size / bulk_size;
311 constexpr
index_t rems = thread_buffer_size % bulk_size;
318 o_type data[bulk_size];
322 static_for<0, bulk_size, 1>{}([&o_bulk, &in_dstr_tensors, &i](
auto ib) {
323 o_bulk.data[ib.value] =
static_cast<o_type
>(
324 in_dstr_tensors.get_thread_buffer()
325 .template get_as<i_type>()[
number<bulk_size * i.value + ib.value>{}]);
335 out_dstr_tensor.get_thread_buffer().template set_as<o_bulk_type>(i, o_bulk.bulk);
338 static_for<0, rems, 1>{}([&](
auto r) {
340 auto idx = number<iters * bulk_size + r>{};
341 out_dstr_tensor.get_thread_buffer().at(idx) =
342 static_cast<o_type
>(in_dstr_tensors.get_thread_buffer().at(idx));
345 return out_dstr_tensor;
350 template <
typename DstType,
typename SrcTensor>
353 if constexpr((std::is_same_v<DstType, fp8_t> || std::is_same_v<DstType, bf8_t>) &&
354 std::is_same_v<typename SrcTensor::DataType, float> &&
355 (SrcTensor::get_thread_buffer_size() % 4 == 0))
356 return impl::cast_tile_pk_fp8_fp32<DstType, SrcTensor>(src_tensor);
357 #if CK_TILE_USE_PK_FP16_TILE_CAST
358 else if constexpr(std::is_same_v<DstType, fp16_t> &&
359 std::is_same_v<typename SrcTensor::DataType, float> &&
360 (SrcTensor::get_thread_buffer_size() % 2 == 0))
361 return impl::cast_tile_pkrtz_fp16_fp32<DstType, SrcTensor>(src_tensor);
364 else if constexpr((std::is_same_v<DstType, fp16_t> || std::is_same_v<DstType, bf16_t>) &&
365 std::is_same_v<typename SrcTensor::DataType, float> &&
366 (SrcTensor::get_thread_buffer_size() % 2 == 0))
367 return impl::cast_tile_pk_fp16bf16_fp32<DstType, SrcTensor>(src_tensor);
369 #if CK_TILE_USE_SUBDWORD_TILE_CAST
370 else if constexpr(
sizeof(DstType) < 4 ||
sizeof(
typename SrcTensor::DataType) < 4)
371 return impl::cast_tile_opt_subdword<DstType, SrcTensor>(src_tensor);
374 return tile_elementwise_in(type_convert<DstType, typename SrcTensor::DataType>, src_tensor);
378 template <
typename InOutElementFunc,
379 typename... MaybeNullTensor,
381 std::disjunction_v<std::is_same<remove_cvref_t<MaybeNullTensor>, null_tensor>...>>>
387 template <
typename InElementFunc,
388 typename... MaybeNullTensor,
390 std::disjunction_v<std::is_same<remove_cvref_t<MaybeNullTensor>, null_tensor>...>>>
#define CK_TILE_DEVICE
Definition: config.hpp:45
CK_TILE_DEVICE auto cast_tile_pkrtz_fp16_fp32(const InTensor &in_dstr_tensors)
Definition: tile_elementwise.hpp:231
CK_TILE_DEVICE auto cast_tile_pk_fp8_fp32(const InTensor &in_dstr_tensors)
Definition: tile_elementwise.hpp:185
CK_TILE_DEVICE auto cast_tile_pk_fp16bf16_fp32(const InTensor &in_dstr_tensors)
Definition: tile_elementwise.hpp:262
Definition: cluster_descriptor.hpp:13
CK_TILE_DEVICE auto tile_elementwise_in(const InElementFunc &in_element_func, const InTensor &... in_dstr_tensors)
Definition: tile_elementwise.hpp:40
CK_TILE_DEVICE void set_tile(DstrTensors &dstr_tensor, const T &value)
Definition: tile_elementwise.hpp:95
bfloat16_t bf16x2_t
Definition: bfloat16.hpp:433
CK_TILE_DEVICE void tile_elementwise_inout(const InOutElementFunc &inout_element_func, InOutDstrTensors &... inout_dstr_tensors)
Definition: tile_elementwise.hpp:23
float fp32x2_t
Definition: bfloat16.hpp:434
int32_t index_t
Definition: integer.hpp:9
remove_cv_t< std::remove_reference_t< T > > remove_cvref_t
Definition: type_traits.hpp:21
constant< v > number
Definition: integral_constant.hpp:37
_Float16 fp16x2_t
Definition: half.hpp:385
CK_TILE_DEVICE auto tile_elementwise_inout_unpack(const InElementFunc &in_element_func, const Tuple &t, std::index_sequence< I... >)
Template function that "unpacks" a tuple and applies an element-wise operation.
Definition: tile_elementwise.hpp:71
CK_TILE_DEVICE auto cast_tile(const SrcTensor &src_tensor)
Definition: tile_elementwise.hpp:351
CK_TILE_DEVICE void clear_tile(DstrTensors &dstr_tensor)
Definition: tile_elementwise.hpp:177
typename std::enable_if< B, T >::type enable_if_t
Definition: enable_if.hpp:27
const GenericPointer< typename T::ValueType > T2 value
Definition: pointer.h:1697
unsigned int uint32_t
Definition: stdint.h:126
A fixed-size array container similar to std::array with additional utilities.
Definition: array.hpp:43
Definition: integral_constant.hpp:13
Definition: null_tensor.hpp:9
Definition: functional.hpp:43