/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/device/device_batchnorm_infer.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/device/device_batchnorm_infer.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/docs-6.4.3/include/ck/tensor_operation/gpu/device/device_batchnorm_infer.hpp Source File
device_batchnorm_infer.hpp
Go to the documentation of this file.
1 // SPDX-License-Identifier: MIT
2 // Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
3 
4 #pragma once
5 
6 #include <array>
7 #include <memory>
8 
9 #include "ck/ck.hpp"
11 
12 namespace ck {
13 namespace tensor_operation {
14 namespace device {
15 
16 template <typename XDataType,
17  typename YDataType,
18  typename AccDataType,
19  typename ScaleDataType,
20  typename BiasDataType,
21  typename MeanVarDataType,
22  typename YElementwiseOp,
23  index_t Rank,
24  index_t NumBatchNormReduceDim>
26 {
27  virtual std::unique_ptr<BaseArgument> MakeArgumentPointer(
28  const std::array<index_t, Rank> xyLengths,
29  const std::array<index_t, Rank> xStrides,
30  const std::array<index_t, Rank> yStrides,
31  const std::array<int, NumBatchNormReduceDim> reduceDims,
32  const std::array<index_t, Rank - NumBatchNormReduceDim> bnScaleBiasMeanVarLengths,
33  const std::array<index_t, Rank - NumBatchNormReduceDim> bnScaleStrides,
34  const std::array<index_t, Rank - NumBatchNormReduceDim> bnBiasStrides,
35  const std::array<index_t, Rank - NumBatchNormReduceDim> bnMeanVarStrides,
36  const void* p_x,
37  const void* bnScale,
38  const void* bnBias,
39  double epsilon,
40  const YElementwiseOp y_elementwise_op,
41  const void* estimatedMean,
42  const void* estimatedInvVariance,
43  void* p_y) = 0;
44 
45  virtual std::unique_ptr<BaseInvoker> MakeInvokerPointer() = 0;
46 };
47 
48 template <typename XDataType,
49  typename YDataType,
50  typename AccDataType,
51  typename ScaleDataType,
52  typename BiasDataType,
53  typename MeanVarDataType,
54  typename YElementwiseOp,
55  index_t Rank,
56  index_t NumBatchNormReduceDim>
57 using DeviceBatchNormInferPtr = std::unique_ptr<DeviceBatchNormInfer<XDataType,
58  YDataType,
59  AccDataType,
60  ScaleDataType,
61  BiasDataType,
62  MeanVarDataType,
63  YElementwiseOp,
64  Rank,
65  NumBatchNormReduceDim>>;
66 
67 } // namespace device
68 } // namespace tensor_operation
69 } // namespace ck
std::unique_ptr< DeviceBatchNormInfer< XDataType, YDataType, AccDataType, ScaleDataType, BiasDataType, MeanVarDataType, YElementwiseOp, Rank, NumBatchNormReduceDim > > DeviceBatchNormInferPtr
Definition: device_batchnorm_infer.hpp:65
Definition: ck.hpp:264
int32_t index_t
Definition: ck.hpp:289
Definition: device_base.hpp:76
Definition: device_batchnorm_infer.hpp:26
virtual std::unique_ptr< BaseInvoker > MakeInvokerPointer()=0
virtual std::unique_ptr< BaseArgument > MakeArgumentPointer(const std::array< index_t, Rank > xyLengths, const std::array< index_t, Rank > xStrides, const std::array< index_t, Rank > yStrides, const std::array< int, NumBatchNormReduceDim > reduceDims, const std::array< index_t, Rank - NumBatchNormReduceDim > bnScaleBiasMeanVarLengths, const std::array< index_t, Rank - NumBatchNormReduceDim > bnScaleStrides, const std::array< index_t, Rank - NumBatchNormReduceDim > bnBiasStrides, const std::array< index_t, Rank - NumBatchNormReduceDim > bnMeanVarStrides, const void *p_x, const void *bnScale, const void *bnBias, double epsilon, const YElementwiseOp y_elementwise_op, const void *estimatedMean, const void *estimatedInvVariance, void *p_y)=0