RNN#
Enumerations | |
enum | miopenRNNMode_t { miopenRNNRELU = 0 , miopenRNNTANH = 1 , miopenLSTM = 2 , miopenGRU = 3 } |
enum | miopenRNNInputMode_t { miopenRNNlinear = 0 , miopenRNNskip = 1 } |
enum | miopenRNNAlgo_t { miopenRNNdefault = 0 , miopenRNNfundamental } |
enum | miopenRNNDirectionMode_t { miopenRNNunidirection = 0 , miopenRNNbidirection = 1 } |
enum | miopenRNNBiasMode_t { miopenRNNNoBias = 0 , miopenRNNwithBias = 1 } |
enum | miopenRNNGEMMalgoMode_t { miopenRNNAlgoGEMM = 0 } |
enum | miopenRNNPaddingMode_t { miopenRNNIONotPadded = 0 , miopenRNNIOWithPadding = 1 } |
enum | miopenRNNFWDMode_t { miopenRNNTraining = 0 , miopenRNNInference = 1 } |
enum | miopenRNNBaseLayout_t { miopenRNNDataUnknownLayout = 0 , miopenRNNDataSeqMajorNotPadded = 1 , miopenRNNDataSeqMajorPadded = 2 , miopenRNNDataBatchMajorPadded = 3 } |
Functions | |
MIOPEN_DECLARE_OBJECT (miopenRNNDescriptor) | |
Creates the miopenRNNDescriptor_t type. More... | |
miopenStatus_t | miopenCreateRNNDescriptor (miopenRNNDescriptor_t *rnnDesc) |
Create a RNN layer Descriptor. More... | |
miopenStatus_t | miopenGetRNNDescriptor (miopenRNNDescriptor_t rnnDesc, miopenRNNMode_t *rnnMode, miopenRNNAlgo_t *algoMode, miopenRNNInputMode_t *inputMode, miopenRNNDirectionMode_t *dirMode, miopenRNNBiasMode_t *biasMode, int *hiddenSize, int *layer) |
Retrieves a RNN layer descriptor's details. More... | |
miopenStatus_t | miopenGetRNNDescriptor_V2 (miopenRNNDescriptor_t rnnDesc, int *hiddenSize, int *layer, miopenDropoutDescriptor_t *dropoutDesc, miopenRNNInputMode_t *inputMode, miopenRNNDirectionMode_t *dirMode, miopenRNNMode_t *rnnMode, miopenRNNBiasMode_t *biasMode, miopenRNNAlgo_t *algoMode, miopenDataType_t *dataType) |
Retrieves a RNN layer descriptor's details version 2. This version enables retrieving information of the dropout descriptor of the rnn descriptor. More... | |
miopenStatus_t | miopenDestroyRNNDescriptor (miopenRNNDescriptor_t rnnDesc) |
Destroys the tensor descriptor object. More... | |
miopenStatus_t | miopenSetRNNDescriptor (miopenRNNDescriptor_t rnnDesc, const int hsize, const int nlayers, miopenRNNInputMode_t inMode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnnMode, miopenRNNBiasMode_t biasMode, miopenRNNAlgo_t algo, miopenDataType_t dataType) |
Set the details of the RNN descriptor. More... | |
miopenStatus_t | miopenSetRNNDescriptor_V2 (miopenRNNDescriptor_t rnnDesc, const int hsize, const int nlayers, miopenDropoutDescriptor_t dropoutDesc, miopenRNNInputMode_t inMode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnnMode, miopenRNNBiasMode_t biasMode, miopenRNNAlgo_t algo, miopenDataType_t dataType) |
Set the details of the RNN descriptor version 2. This version enables the use of dropout in rnn. More... | |
miopenStatus_t | miopenSetRNNDataSeqTensorDescriptor (miopenSeqTensorDescriptor_t seqTensorDesc, miopenDataType_t dataType, miopenRNNBaseLayout_t layout, int maxSequenceLen, int batchSize, int vectorSize, const int *sequenceLenArray, void *paddingMarker) |
Set shape of RNN seqData tensor. More... | |
miopenStatus_t | miopenGetRNNDataSeqTensorDescriptor (miopenSeqTensorDescriptor_t seqTensorDesc, miopenDataType_t *dataType, miopenRNNBaseLayout_t *layout, int *maxSequenceLen, int *batchSize, int *vectorSize, int sequenceLenArrayLimit, int *sequenceLenArray, void *paddingMarker) |
Get shape of RNN seqData tensor. More... | |
miopenStatus_t | miopenGetRNNWorkspaceSize (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, size_t *numBytes) |
Query the amount of memory required to execute the RNN layer. More... | |
miopenStatus_t | miopenGetRNNTrainingReserveSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, size_t *numBytes) |
Query the amount of memory required for RNN training. More... | |
miopenStatus_t | miopenGetRNNTempSpaceSizes (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, miopenSeqTensorDescriptor_t xDesc, miopenRNNFWDMode_t fwdMode, size_t *workSpaceSize, size_t *reserveSpaceSize) |
Query the amount of additional memory required for this RNN layer execution. More... | |
miopenStatus_t | miopenGetRNNParamsSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, miopenTensorDescriptor_t xDesc, size_t *numBytes, miopenDataType_t dtype) |
Query the amount of parameter memory required for RNN training. More... | |
miopenStatus_t | miopenGetRNNParamsDescriptor (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, miopenDataType_t dtype) |
Obtain a weight tensor descriptor for RNNs. More... | |
miopenStatus_t | miopenGetRNNInputTensorSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int seqLen, miopenTensorDescriptor_t *xDesc, size_t *numBytes) |
Obtain a the size in bytes of the RNN input tensor. More... | |
miopenStatus_t | miopenGetRNNHiddenTensorSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int seqLen, miopenTensorDescriptor_t *xDesc, size_t *numBytes) |
Obtain a the size in bytes of the RNN hidden tensor. More... | |
miopenStatus_t | miopenGetRNNLayerParamSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, const int paramID, size_t *numBytes) |
Gets the number of bytes of a parameter matrix. More... | |
miopenStatus_t | miopenGetRNNLayerBiasSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, const int biasID, size_t *numBytes) |
Gets the number of bytes of a bias. More... | |
miopenStatus_t | miopenGetRNNLayerParam (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, const void *w, const int paramID, miopenTensorDescriptor_t paramDesc, void *layerParam) |
Gets a weight matrix for a specific layer in an RNN stack. More... | |
miopenStatus_t | miopenGetRNNLayerBias (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, const void *w, const int biasID, miopenTensorDescriptor_t biasDesc, void *layerBias) |
Gets a bias for a specific layer in an RNN stack. More... | |
miopenStatus_t | miopenGetRNNLayerParamOffset (miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, const int paramID, miopenTensorDescriptor_t paramDesc, size_t *layerParamOffset) |
Gets an index offset for a specific weight matrix for a layer in the RNN stack. More... | |
miopenStatus_t | miopenGetRNNLayerBiasOffset (miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, const int biasID, miopenTensorDescriptor_t biasDesc, size_t *layerBiasOffset) |
Gets a bias index offset for a specific layer in an RNN stack. More... | |
miopenStatus_t | miopenSetRNNLayerParam (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, void *w, const int paramID, miopenTensorDescriptor_t paramDesc, const void *layerParam) |
Sets a weight matrix for a specific layer in an RNN stack. More... | |
miopenStatus_t | miopenSetRNNLayerBias (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, void *w, const int biasID, miopenTensorDescriptor_t biasDesc, const void *layerBias) |
Sets a bias for a specific layer in an RNN stack. More... | |
miopenStatus_t | miopenSetRNNPaddingMode (miopenRNNDescriptor_t rnnDesc, miopenRNNPaddingMode_t paddingMode) |
Sets a bias for a specific layer in an RNN stack. More... | |
miopenStatus_t | miopenGetRNNPaddingMode (miopenRNNDescriptor_t rnnDesc, miopenRNNPaddingMode_t *paddingMode) |
This function retrieves the RNN padding mode from the RNN descriptor. More... | |
miopenStatus_t | miopenRNNForward (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, miopenRNNFWDMode_t fwdMode, const miopenSeqTensorDescriptor_t xDesc, const void *x, const miopenTensorDescriptor_t hDesc, const void *hx, void *hy, const miopenTensorDescriptor_t cDesc, const void *cx, void *cy, const miopenSeqTensorDescriptor_t yDesc, void *y, const void *w, size_t weightSpaceSize, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes) |
Execute forward training for recurrent layer. More... | |
miopenStatus_t | miopenRNNBackwardSeqData (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const miopenSeqTensorDescriptor_t yDesc, const void *y, const void *dy, const miopenTensorDescriptor_t hDesc, const void *hx, const void *dhy, void *dhx, const miopenTensorDescriptor_t cDesc, const void *cx, const void *dcy, void *dcx, const miopenSeqTensorDescriptor_t xDesc, void *dx, const void *w, size_t weightSpaceSize, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes) |
Execute backward data for recurrent layer. More... | |
miopenStatus_t | miopenRNNBackwardWeightsSeqTensor (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const miopenSeqTensorDescriptor_t xDesc, const void *x, const miopenTensorDescriptor_t hDesc, const void *hx, const miopenSeqTensorDescriptor_t yDesc, const void *y, void *dw, size_t weightSpaceSize, void *workSpace, size_t workSpaceNumBytes, const void *reserveSpace, size_t reserveSpaceNumBytes) |
Execute backward weights for recurrent layer. More... | |
miopenStatus_t | miopenRNNForwardTraining (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, const void *x, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t cxDesc, const void *cx, const miopenTensorDescriptor_t wDesc, const void *w, const miopenTensorDescriptor_t *yDesc, void *y, const miopenTensorDescriptor_t hyDesc, void *hy, const miopenTensorDescriptor_t cyDesc, void *cy, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes) |
Execute forward training for recurrent layer. More... | |
miopenStatus_t | miopenRNNBackwardData (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *yDesc, const void *y, const miopenTensorDescriptor_t *dyDesc, const void *dy, const miopenTensorDescriptor_t dhyDesc, const void *dhy, const miopenTensorDescriptor_t dcyDesc, const void *dcy, const miopenTensorDescriptor_t wDesc, const void *w, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t cxDesc, const void *cx, const miopenTensorDescriptor_t *dxDesc, void *dx, const miopenTensorDescriptor_t dhxDesc, void *dhx, const miopenTensorDescriptor_t dcxDesc, void *dcx, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes) |
Execute backward data for recurrent layer. More... | |
miopenStatus_t | miopenRNNBackwardWeights (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, const void *x, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t *yDesc, const void *y, const miopenTensorDescriptor_t dwDesc, void *dw, void *workSpace, size_t workSpaceNumBytes, const void *reserveSpace, size_t reserveSpaceNumBytes) |
Execute backward weights for recurrent layer. More... | |
miopenStatus_t | miopenRNNForwardInference (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, const void *x, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t cxDesc, const void *cx, const miopenTensorDescriptor_t wDesc, const void *w, const miopenTensorDescriptor_t *yDesc, void *y, const miopenTensorDescriptor_t hyDesc, void *hy, const miopenTensorDescriptor_t cyDesc, void *cy, void *workSpace, size_t workSpaceNumBytes) |
Execute forward inference for RNN layer. More... | |
Detailed Description
Enumeration Type Documentation
◆ miopenRNNAlgo_t
enum miopenRNNAlgo_t |
◆ miopenRNNBaseLayout_t
◆ miopenRNNBiasMode_t
enum miopenRNNBiasMode_t |
◆ miopenRNNDirectionMode_t
◆ miopenRNNFWDMode_t
enum miopenRNNFWDMode_t |
◆ miopenRNNGEMMalgoMode_t
◆ miopenRNNInputMode_t
enum miopenRNNInputMode_t |
◆ miopenRNNMode_t
enum miopenRNNMode_t |
◆ miopenRNNPaddingMode_t
Function Documentation
◆ MIOPEN_DECLARE_OBJECT()
MIOPEN_DECLARE_OBJECT | ( | miopenRNNDescriptor | ) |
Creates the miopenRNNDescriptor_t type.
◆ miopenCreateRNNDescriptor()
miopenStatus_t miopenCreateRNNDescriptor | ( | miopenRNNDescriptor_t * | rnnDesc | ) |
Create a RNN layer Descriptor.
API for creating an uninitialized RNN layer descriptor.
- Parameters
-
rnnDesc Pointer to a tensor descriptor type
- Returns
- miopenStatus_t
◆ miopenDestroyRNNDescriptor()
miopenStatus_t miopenDestroyRNNDescriptor | ( | miopenRNNDescriptor_t | rnnDesc | ) |
Destroys the tensor descriptor object.
- Parameters
-
rnnDesc RNN tensor descriptor type (input)
- Returns
- miopenStatus_t
◆ miopenGetRNNDataSeqTensorDescriptor()
miopenStatus_t miopenGetRNNDataSeqTensorDescriptor | ( | miopenSeqTensorDescriptor_t | seqTensorDesc, |
miopenDataType_t * | dataType, | ||
miopenRNNBaseLayout_t * | layout, | ||
int * | maxSequenceLen, | ||
int * | batchSize, | ||
int * | vectorSize, | ||
int | sequenceLenArrayLimit, | ||
int * | sequenceLenArray, | ||
void * | paddingMarker | ||
) |
Get shape of RNN seqData tensor.
Interface for setting tensor shape to be used as RNN input data
- Parameters
-
seqTensorDesc Tensor descriptor (input) dataType MIOpen datatype (output) layout One of the main supported layouts for RNN data(output) maxSequenceLen Sequence length limit within this SeqTensor(output) batchSize Number of sequences within this SeqTensor (output) vectorSize Vector size (output) sequenceLenArrayLimit Limit for number of elements that can be returned to user by sequenceLenArray (input) sequenceLenArray Array containing the length of each sequence in the SeqTensor. This is allowed to be a NULL pointer if sequenceLenArrayLimit is 0 (output) paddingMarker Not used, should be NULL (input)
- Returns
- miopenStatus_t
◆ miopenGetRNNDescriptor()
miopenStatus_t miopenGetRNNDescriptor | ( | miopenRNNDescriptor_t | rnnDesc, |
miopenRNNMode_t * | rnnMode, | ||
miopenRNNAlgo_t * | algoMode, | ||
miopenRNNInputMode_t * | inputMode, | ||
miopenRNNDirectionMode_t * | dirMode, | ||
miopenRNNBiasMode_t * | biasMode, | ||
int * | hiddenSize, | ||
int * | layer | ||
) |
Retrieves a RNN layer descriptor's details.
- Parameters
-
rnnDesc RNN layer descriptor (input) rnnMode RNN mode (output) algoMode RNN algorithm mode (output) inputMode RNN data input mode (output) dirMode Uni or bi direction mode (output) biasMode Bias used (output) hiddenSize Size of hidden state (output) layer Number of stacked layers (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNDescriptor_V2()
miopenStatus_t miopenGetRNNDescriptor_V2 | ( | miopenRNNDescriptor_t | rnnDesc, |
int * | hiddenSize, | ||
int * | layer, | ||
miopenDropoutDescriptor_t * | dropoutDesc, | ||
miopenRNNInputMode_t * | inputMode, | ||
miopenRNNDirectionMode_t * | dirMode, | ||
miopenRNNMode_t * | rnnMode, | ||
miopenRNNBiasMode_t * | biasMode, | ||
miopenRNNAlgo_t * | algoMode, | ||
miopenDataType_t * | dataType | ||
) |
Retrieves a RNN layer descriptor's details version 2. This version enables retrieving information of the dropout descriptor of the rnn descriptor.
- Parameters
-
rnnDesc RNN layer descriptor (input) hiddenSize Size of hidden state (output) layer Number of stacked layers (output) dropoutDesc Pre-configured dropout descriptor for dropout layer in between RNN layers (output) inputMode RNN data input mode (output) dirMode Uni or bi direction mode (output) rnnMode RNN mode (output) biasMode Bias used (output) algoMode RNN algorithm mode (output) dataType Data type of RNN (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNHiddenTensorSize()
miopenStatus_t miopenGetRNNHiddenTensorSize | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | seqLen, | ||
miopenTensorDescriptor_t * | xDesc, | ||
size_t * | numBytes | ||
) |
Obtain a the size in bytes of the RNN hidden tensor.
This function determines the size in bytes of the allocation needed for the hidden tensor over all layers
- Parameters
-
handle MIOpen handle (input) rnnDesc Fully populated RNN layer descriptor type (input) seqLen Number of iteration unrolls (input) xDesc An array of previously populated tensor descriptors (input) numBytes Number of bytes required for input tensor (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNInputTensorSize()
miopenStatus_t miopenGetRNNInputTensorSize | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | seqLen, | ||
miopenTensorDescriptor_t * | xDesc, | ||
size_t * | numBytes | ||
) |
Obtain a the size in bytes of the RNN input tensor.
This function determines the size in bytes of the allocation needed for the input data tensor for an RNN layer. The number of bytes is derived from the array of tensor descriptors.
- Parameters
-
handle MIOpen handle (input) rnnDesc Fully populated RNN layer descriptor (input) seqLen Number of iteration unrolls (input) xDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) numBytes Number of bytes required for input tensor (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNLayerBias()
miopenStatus_t miopenGetRNNLayerBias | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
miopenTensorDescriptor_t | wDesc, | ||
const void * | w, | ||
const int | biasID, | ||
miopenTensorDescriptor_t | biasDesc, | ||
void * | layerBias | ||
) |
Gets a bias for a specific layer in an RNN stack.
This function retrieves the bias data for a specific layer and bias ID and copies the data into previously allocated device memory.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the bias associated with the in input GEMM, while biasID == 1 retrieves the bias associated with the hidden state GEMM.
For miopenLSTM biasID 0 to 3 refer to the biases associated with the input GEMM, 4-7 are associated with biases associated with the hidden state GEMM.
- biasID 0 and 4 are for the input gate.
- biasID 1 and 5 are for the forget gate.
- biasID 2 and 6 are for the output gate.
- biasID 3 and 7 are for the new memory gate.
For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- biasID 0 and 3 are for the update gate.
- biasID 1 and 4 are for the reset gate.
- biasID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
The output argument biasDesc is a previously created tensor descriptor that is populated to describe the memory layout of the bias. It is full packed and is used when calling to miopenSetRNNLayerBias()
The argument layerBias should either be nullptr, or have device memory allocated to allow copying of the entire layer bias into it. If layerBias is nullptr then only the biasDesc is populated and returned. The size in bytes of the layer bias can be determined by using miopenGetRNNLayerBiasSize().
Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerBias() will return a error status miopenStatusBadParm for input biasID associated with the input GEMM.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) wDesc A tensor descriptor to the parameter tensor (input) w Pointer to memory containing parameter tensor (input) biasID ID of the internal parameter tensor (input) biasDesc Descriptor of the parameter tensor (output) layerBias Pointer to the memory location of the bias tensor (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNLayerBiasOffset()
miopenStatus_t miopenGetRNNLayerBiasOffset | ( | miopenRNNDescriptor_t | rnnDesc, |
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
const int | biasID, | ||
miopenTensorDescriptor_t | biasDesc, | ||
size_t * | layerBiasOffset | ||
) |
Gets a bias index offset for a specific layer in an RNN stack.
This function retrieves the bias index offset for a specific layer and bias ID.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the bias associated with the in input GEMM, while biasID == 1 retrieves the weight matrix associated with the hidden state GEMM.
For miopenLSTM biasID 0 to 3 refer to the bias offset associated with the input GEMM, 4-7 are the bias offsets associated with the hidden state GEMM.
- biasID 0 and 4 are for the input gate.
- biasID 1 and 5 are for the forget gate.
- biasID 2 and 6 are for the output gate.
- biasID 3 and 7 are for the new memory gate.
For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- biasID 0 and 3 are for the update gate.
- biasID 1 and 4 are for the reset gate.
- biasID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
The output argument biasDesc is a previously created tensor descriptor that is populated to describe the memory layout of the bias. It is full packed and is used when calling to miopenSetRNNLayerBias()
The argument layerBiasOffset should either be nullptr, or point to an output address. If layerBias is nullptr then only the biasDesc is populated and returned.
Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerBiasOffset() will return a error status miopenStatusBadParm for input biasID associated with the input GEMM.
- Parameters
-
rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) biasID ID of the internal parameter tensor (input) biasDesc Descriptor of the parameter tensor (output) layerBiasOffset Pointer to the memory location of the bias tensor (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNLayerBiasSize()
miopenStatus_t miopenGetRNNLayerBiasSize | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | layer, | ||
const int | biasID, | ||
size_t * | numBytes | ||
) |
Gets the number of bytes of a bias.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the weight matrix associated with the in input GEMM, while biasID == 1 retrieves the bias associated with the hidden state GEMM.
For miopenLSTM biasID 0 to 3 refer to the biases associated with the input GEMM, 4-7 are associated with biases associated with the hidden state GEMM.
- biasID 0 and 4 are for the input gate.
- biasID 1 and 5 are for the forget gate.
- biasID 2 and 6 are for the output gate.
- biasID 3 and 7 are for the new memory gate.
For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- biasID 0 and 3 are for the update gate.
- biasID 1 and 4 are for the reset gate.
- biasID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) biasID ID of the internal parameter tensor (input) numBytes The number of bytes of the layer's bias (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNLayerParam()
miopenStatus_t miopenGetRNNLayerParam | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
miopenTensorDescriptor_t | wDesc, | ||
const void * | w, | ||
const int | paramID, | ||
miopenTensorDescriptor_t | paramDesc, | ||
void * | layerParam | ||
) |
Gets a weight matrix for a specific layer in an RNN stack.
This function retrieves the weight matrix data for a specific layer and parameter ID and copies the data into previously allocated device memory.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 retrieves the weight matrix associated with the in input GEMM, while paramID == 1 retrieves the weight matrix associated with the hidden state GEMM.
For miopenLSTM paramID 0 to 3 refer to the weight matrices associated with the input GEMM, 4-7 are associated with matrices associated with the hidden state GEMM.
- paramID 0 and 4 are for the input gate.
- paramID 1 and 5 are for the forget gate.
- paramID 2 and 6 are for the output gate.
- paramID 3 and 7 are for the new memory gate.
For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- paramID 0 and 3 are for the update gate.
- paramID 1 and 4 are for the reset gate.
- paramID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
The output argument paramDesc is a previously created tensor descriptor that is populated to describe the memory layout of the parameter matrix. It is full packed and is used when calling to miopenSetRNNLayerParam()
The argument layerParam should either be nullptr, or have device memory allocated to allow copying of the entire layer parameter matrix into it. If layerParam is nullptr then only the paramDesc is populated and returned. The size in bytes of the layer parameter matrix can be determined by using miopenGetRNNLayerParamSize().
Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerParam() will return a error status miopenStatusBadParm for input paramID associated with the input GEMM.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) wDesc A tensor descriptor to the parameter tensor (input) w Pointer to memory containing parameter tensor (input) paramID ID of the internal parameter tensor (input) paramDesc Tensor descriptor for the fully packed output parameter tensor (output) layerParam Pointer to the memory location of the parameter tensor (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNLayerParamOffset()
miopenStatus_t miopenGetRNNLayerParamOffset | ( | miopenRNNDescriptor_t | rnnDesc, |
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
const int | paramID, | ||
miopenTensorDescriptor_t | paramDesc, | ||
size_t * | layerParamOffset | ||
) |
Gets an index offset for a specific weight matrix for a layer in the RNN stack.
This function retrieves the index offset for a weight matrix in a layer.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 retrieves the weight matrix offset associated with the in input GEMM, while paramID == 1 retrieves the weight matrix offset associated with the hidden state GEMM.
For miopenLSTM paramID 0 to 3 refer to the weight matrix offsets associated with the input GEMM, 4-7 are associated with matrix offset associated with the hidden state GEMM.
- paramID 0 and 4 are for the input gate.
- paramID 1 and 5 are for the forget gate.
- paramID 2 and 6 are for the output gate.
- paramID 3 and 7 are for the new memory gate.
For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- paramID 0 and 3 are for the update gate.
- paramID 1 and 4 are for the reset gate.
- paramID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
The output argument paramDesc is a previously created tensor descriptor that is populated to describe the memory layout of the parameter matrix. It is full packed and is used when calling to miopenSetRNNLayerParam().
The argument layerParamOffset should either be nullptr, or an address to place the offset. If layerParamOffset is nullptr then only the paramDesc is populated and returned.
Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerParamOffset() will return a error status miopenStatusBadParm for input paramID associated with the input GEMM.
- Parameters
-
rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) paramID ID of the internal parameter tensor (input) paramDesc Tensor descriptor for the fully packed output parameter tensor (output) layerParamOffset Location for the parameter offset (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNLayerParamSize()
miopenStatus_t miopenGetRNNLayerParamSize | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
const int | paramID, | ||
size_t * | numBytes | ||
) |
Gets the number of bytes of a parameter matrix.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 retrieves the weight matrix associated with the in input GEMM, while paramID == 1 retrieves the weight matrix associated with the hidden state GEMM.
For miopenLSTM paramID 0 to 3 refer to the weight matrices associated with the input GEMM, 4-7 are associated with matrices associated with the hidden state GEMM.
- paramID 0 and 4 are for the input gate.
- paramID 1 and 5 are for the forget gate.
- paramID 2 and 6 are for the output gate.
- paramID 3 and 7 are for the new memory gate.
For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- paramID 0 and 3 are for the update gate.
- paramID 1 and 4 are for the reset gate.
- paramID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) paramID ID of the internal parameter tensor (input) numBytes The number of bytes of the layer's parameter matrix (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNPaddingMode()
miopenStatus_t miopenGetRNNPaddingMode | ( | miopenRNNDescriptor_t | rnnDesc, |
miopenRNNPaddingMode_t * | paddingMode | ||
) |
This function retrieves the RNN padding mode from the RNN descriptor.
- Parameters
-
rnnDesc RNN layer descriptor type (input) paddingMode Pointer to the RNN padding mode (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNParamsDescriptor()
miopenStatus_t miopenGetRNNParamsDescriptor | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
miopenTensorDescriptor_t | xDesc, | ||
miopenTensorDescriptor_t | wDesc, | ||
miopenDataType_t | dtype | ||
) |
Obtain a weight tensor descriptor for RNNs.
This function populates a weight descriptor that describes the memory layout of the weight matrix.
- Parameters
-
handle MIOpen handle (input) rnnDesc Fully populated RNN layer descriptor type (input) xDesc A previously populated tensor descriptor (input) wDesc A previously allocated tensor descriptor (output) dtype MIOpen data type enum (input)
- Returns
- miopenStatus_t
◆ miopenGetRNNParamsSize()
miopenStatus_t miopenGetRNNParamsSize | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
miopenTensorDescriptor_t | xDesc, | ||
size_t * | numBytes, | ||
miopenDataType_t | dtype | ||
) |
Query the amount of parameter memory required for RNN training.
This function calculates the amount of parameter memory required to train the RNN layer given an RNN descriptor and a tensor descriptor.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) xDesc A tensor descriptor (input) numBytes Number of bytes required for RNN layer execution (output) dtype MIOpen data type enum (input)
- Returns
- miopenStatus_t
◆ miopenGetRNNTempSpaceSizes()
miopenStatus_t miopenGetRNNTempSpaceSizes | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
miopenSeqTensorDescriptor_t | xDesc, | ||
miopenRNNFWDMode_t | fwdMode, | ||
size_t * | workSpaceSize, | ||
size_t * | reserveSpaceSize | ||
) |
Query the amount of additional memory required for this RNN layer execution.
This function calculates the size of extra buffers, depending on the layer configuration, which is determined by: RNN descriptor, isInference, and data descriptor. If isInference is True, reserve_space_size is always zero, because the reserve_space buffer is not used in Inference computation.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) xDesc Sequence data tensor descriptor (input) fwdMode Specifies in which mode the buffers will be used. workSpaceSize Minimum WorkSpace buffer size required for RNN layer execution (output) reserveSpaceSize Minimum ReserveSpaceSize buffer size required for RNN layer execution (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNTrainingReserveSize()
miopenStatus_t miopenGetRNNTrainingReserveSize | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | sequenceLen, | ||
const miopenTensorDescriptor_t * | xDesc, | ||
size_t * | numBytes | ||
) |
Query the amount of memory required for RNN training.
This function calculates the amount of memory required to train the RNN layer given an RNN descriptor and a tensor descriptor.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) sequenceLen Number of iteration unrolls (input) xDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) numBytes Number of bytes required for RNN layer execution (output)
- Returns
- miopenStatus_t
◆ miopenGetRNNWorkspaceSize()
miopenStatus_t miopenGetRNNWorkspaceSize | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
const int | sequenceLen, | ||
const miopenTensorDescriptor_t * | xDesc, | ||
size_t * | numBytes | ||
) |
Query the amount of memory required to execute the RNN layer.
This function calculates the amount of memory required to run the RNN layer given an RNN descriptor and a tensor descriptor.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) sequenceLen Number of iteration unrolls (input) xDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) numBytes Number of bytes required for RNN layer execution (output)
- Returns
- miopenStatus_t
◆ miopenRNNBackwardData()
miopenStatus_t miopenRNNBackwardData | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
const int | sequenceLen, | ||
const miopenTensorDescriptor_t * | yDesc, | ||
const void * | y, | ||
const miopenTensorDescriptor_t * | dyDesc, | ||
const void * | dy, | ||
const miopenTensorDescriptor_t | dhyDesc, | ||
const void * | dhy, | ||
const miopenTensorDescriptor_t | dcyDesc, | ||
const void * | dcy, | ||
const miopenTensorDescriptor_t | wDesc, | ||
const void * | w, | ||
const miopenTensorDescriptor_t | hxDesc, | ||
const void * | hx, | ||
const miopenTensorDescriptor_t | cxDesc, | ||
const void * | cx, | ||
const miopenTensorDescriptor_t * | dxDesc, | ||
void * | dx, | ||
const miopenTensorDescriptor_t | dhxDesc, | ||
void * | dhx, | ||
const miopenTensorDescriptor_t | dcxDesc, | ||
void * | dcx, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes, | ||
void * | reserveSpace, | ||
size_t | reserveSpaceNumBytes | ||
) |
Execute backward data for recurrent layer.
Interface for executing the backward data pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) sequenceLen Temporal iterations to unroll (input) yDesc An array of tensor descriptors (input) y Pointer to input tensor (input) dyDesc An array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input) dy Pointer to the hidden layer input tensor (input) dhyDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) dhy Pointer to the cell layer input tensor (input) dcyDesc A cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) dcy Pointer to the cell layer input tensor. If dcy is NULL, then the initial delta cell state will be zero initialized. (input) wDesc A weights tensor descriptor (input) w Pointer to input weights tensor (input) hxDesc An input hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) cxDesc A input cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cx Pointer to the hidden layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input) dxDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) dx Pointer to the cell layer output tensor (output) dhxDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) dhx Pointer to the delta hidden layer output tensor. If dhx is NULL the hidden gradient will not ouput. (output) dcxDesc A tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) dcx Pointer to the cell layer output tensor. If dcx is NULL the cell gradient will not ouput. (output) workSpace Pointer to memory allocated for forward training (input) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input) reserveSpace Pointer to memory allocated for random states (input / output) reserveSpaceNumBytes Number of allocated bytes in memory for use in the forward (input)
- Returns
- miopenStatus_t
◆ miopenRNNBackwardSeqData()
miopenStatus_t miopenRNNBackwardSeqData | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
const miopenSeqTensorDescriptor_t | yDesc, | ||
const void * | y, | ||
const void * | dy, | ||
const miopenTensorDescriptor_t | hDesc, | ||
const void * | hx, | ||
const void * | dhy, | ||
void * | dhx, | ||
const miopenTensorDescriptor_t | cDesc, | ||
const void * | cx, | ||
const void * | dcy, | ||
void * | dcx, | ||
const miopenSeqTensorDescriptor_t | xDesc, | ||
void * | dx, | ||
const void * | w, | ||
size_t | weightSpaceSize, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes, | ||
void * | reserveSpace, | ||
size_t | reserveSpaceNumBytes | ||
) |
Execute backward data for recurrent layer.
Interface for executing the backward data pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) yDesc An output tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor
function.(input)y Pointer to input tensor (input) dy Pointer to the hidden layer input tensor (input) hDesc An input hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) dhy Pointer to the cell layer input tensor (input) dhx Pointer to the delta hidden layer output tensor. If dhx is NULL the hidden gradient will not ouput. (output) cDesc A input cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cx Pointer to the hidden layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input) dcy Pointer to the cell layer input tensor. If dcy is NULL, then the initial delta cell state will be zero initialized. (input) dcx Pointer to the cell layer output tensor. If dcx is NULL the cell gradient will not ouput. (output) xDesc An input tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor
function.(input)dx Pointer to the cell layer output tensor (output) w Pointer to input weights tensor (input) weightSpaceSize Number of allocated bytes in memory for the weights tensor workSpace Pointer to memory allocated for forward training (input) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input) reserveSpace Pointer to memory allocated for random states (input / output) reserveSpaceNumBytes Number of allocated bytes in memory for use in the forward (input)
- Returns
- miopenStatus_t
◆ miopenRNNBackwardWeights()
miopenStatus_t miopenRNNBackwardWeights | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
const int | sequenceLen, | ||
const miopenTensorDescriptor_t * | xDesc, | ||
const void * | x, | ||
const miopenTensorDescriptor_t | hxDesc, | ||
const void * | hx, | ||
const miopenTensorDescriptor_t * | yDesc, | ||
const void * | y, | ||
const miopenTensorDescriptor_t | dwDesc, | ||
void * | dw, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes, | ||
const void * | reserveSpace, | ||
size_t | reserveSpaceNumBytes | ||
) |
Execute backward weights for recurrent layer.
Interface for executing the backward weights pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) sequenceLen Temporal iterations to unroll (input) xDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) x Pointer to input tensor (input) hxDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) yDesc An array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input) y Pointer to the output tensor (input) dwDesc A weights tensor descriptor (input) dw Pointer to input weights tensor (input / output) workSpace Pointer to memory allocated for forward training (input) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input) reserveSpace Pointer to memory allocated for random states (input) reserveSpaceNumBytes Number of allocated bytes in memory for use in the forward (input)
- Returns
- miopenStatus_t
◆ miopenRNNBackwardWeightsSeqTensor()
miopenStatus_t miopenRNNBackwardWeightsSeqTensor | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
const miopenSeqTensorDescriptor_t | xDesc, | ||
const void * | x, | ||
const miopenTensorDescriptor_t | hDesc, | ||
const void * | hx, | ||
const miopenSeqTensorDescriptor_t | yDesc, | ||
const void * | y, | ||
void * | dw, | ||
size_t | weightSpaceSize, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes, | ||
const void * | reserveSpace, | ||
size_t | reserveSpaceNumBytes | ||
) |
Execute backward weights for recurrent layer.
Interface for executing the backward weights pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) xDesc An input tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor
function.(input)x Pointer to input tensor (input) hDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) yDesc An output tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor
function.(input)y Pointer to the output tensor (input) dw Pointer to input weights tensor (input / output) weightSpaceSize Number of allocated bytes in memory for the weights tensor workSpace Pointer to memory allocated for forward training (input) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input) reserveSpace Pointer to memory allocated for random states (input) reserveSpaceNumBytes Number of allocated bytes in memory for use in the forward (input)
- Returns
- miopenStatus_t
◆ miopenRNNForward()
miopenStatus_t miopenRNNForward | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
miopenRNNFWDMode_t | fwdMode, | ||
const miopenSeqTensorDescriptor_t | xDesc, | ||
const void * | x, | ||
const miopenTensorDescriptor_t | hDesc, | ||
const void * | hx, | ||
void * | hy, | ||
const miopenTensorDescriptor_t | cDesc, | ||
const void * | cx, | ||
void * | cy, | ||
const miopenSeqTensorDescriptor_t | yDesc, | ||
void * | y, | ||
const void * | w, | ||
size_t | weightSpaceSize, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes, | ||
void * | reserveSpace, | ||
size_t | reserveSpaceNumBytes | ||
) |
Execute forward training for recurrent layer.
Interface for executing the forward training / inference pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) fwdMode Specifies in which mode the buffers will be used. xDesc An input tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor
function.(input)x Pointer to input tensor (input) hDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) hy Pointer to the hidden layer output tensor. If hy is NULL, then the final hidden state will not be saved. (output) cDesc A cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cx Pointer to the cell layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input) cy Pointer to the cell layer output tensor. If hy is NULL, then the final cell state will not be saved. (output) yDesc An array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input) y Pointer to output tensor (output) w Pointer to input weights tensor (input) weightSpaceSize Number of allocated bytes in memory for the weights tensor workSpace Pointer to memory allocated for forward (input / output) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input) reserveSpace Pointer to memory allocated for hidden states used durning training (input / output) reserveSpaceNumBytes Number of allocated bytes in memory for use in the forward (input)
- Returns
- miopenStatus_t
◆ miopenRNNForwardInference()
miopenStatus_t miopenRNNForwardInference | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | sequenceLen, | ||
const miopenTensorDescriptor_t * | xDesc, | ||
const void * | x, | ||
const miopenTensorDescriptor_t | hxDesc, | ||
const void * | hx, | ||
const miopenTensorDescriptor_t | cxDesc, | ||
const void * | cx, | ||
const miopenTensorDescriptor_t | wDesc, | ||
const void * | w, | ||
const miopenTensorDescriptor_t * | yDesc, | ||
void * | y, | ||
const miopenTensorDescriptor_t | hyDesc, | ||
void * | hy, | ||
const miopenTensorDescriptor_t | cyDesc, | ||
void * | cy, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes | ||
) |
Execute forward inference for RNN layer.
Interface for executing the forward inference pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) sequenceLen Temporal iterations to unroll (input) xDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) x Pointer to input tensor (input) hxDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) cxDesc A cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cx Pointer to the cell layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input) wDesc A weights tensor descriptor (input) w Pointer to input weights tensor (input) yDesc An array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input) y Pointer to output tensor (output) hyDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hy Pointer to the hidden layer output tensor. If hy is NULL, then the final hidden state will not be saved. (output) cyDesc A output cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cy Pointer to the cell layer output tensor. If cy is NULL, then the final cell state will not be saved. (output) workSpace Pointer to memory allocated for forward training (input) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input)
- Returns
- miopenStatus_t
◆ miopenRNNForwardTraining()
miopenStatus_t miopenRNNForwardTraining | ( | miopenHandle_t | handle, |
const miopenRNNDescriptor_t | rnnDesc, | ||
const int | sequenceLen, | ||
const miopenTensorDescriptor_t * | xDesc, | ||
const void * | x, | ||
const miopenTensorDescriptor_t | hxDesc, | ||
const void * | hx, | ||
const miopenTensorDescriptor_t | cxDesc, | ||
const void * | cx, | ||
const miopenTensorDescriptor_t | wDesc, | ||
const void * | w, | ||
const miopenTensorDescriptor_t * | yDesc, | ||
void * | y, | ||
const miopenTensorDescriptor_t | hyDesc, | ||
void * | hy, | ||
const miopenTensorDescriptor_t | cyDesc, | ||
void * | cy, | ||
void * | workSpace, | ||
size_t | workSpaceNumBytes, | ||
void * | reserveSpace, | ||
size_t | reserveSpaceNumBytes | ||
) |
Execute forward training for recurrent layer.
Interface for executing the forward training pass on a RNN.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) sequenceLen Temporal iterations to unroll (input) xDesc An array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input) x Pointer to input tensor (input) hxDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hx Pointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input) cxDesc A cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cx Pointer to the cell layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input) wDesc A weights tensor descriptor (input) w Pointer to input weights tensor (input) yDesc An array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input) y Pointer to output tensor (output) hyDesc A hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) hy Pointer to the hidden layer output tensor. If hy is NULL, then the final hidden state will not be saved. (output) cyDesc A cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input) cy Pointer to the cell layer output tensor. If hy is NULL, then the final cell state will not be saved. (output) workSpace Pointer to memory allocated for forward training (input) workSpaceNumBytes Number of allocated bytes in memory for the workspace (input) reserveSpace Pointer to memory allocated for random states (input / output) reserveSpaceNumBytes Number of allocated bytes in memory for use in the forward (input)
- Returns
- miopenStatus_t
◆ miopenSetRNNDataSeqTensorDescriptor()
miopenStatus_t miopenSetRNNDataSeqTensorDescriptor | ( | miopenSeqTensorDescriptor_t | seqTensorDesc, |
miopenDataType_t | dataType, | ||
miopenRNNBaseLayout_t | layout, | ||
int | maxSequenceLen, | ||
int | batchSize, | ||
int | vectorSize, | ||
const int * | sequenceLenArray, | ||
void * | paddingMarker | ||
) |
Set shape of RNN seqData tensor.
Interface for setting tensor shape to be used as RNN input data
- Parameters
-
seqTensorDesc Tensor descriptor (input/output) dataType MIOpen datatype (input) layout One of the main supported layouts for RNN data(input) maxSequenceLen Sequence length limit within this SeqTensor(input) batchSize Number of sequences within this SeqTensor (input) vectorSize Vector size (input) sequenceLenArray Array containing the length of each sequence in the SeqTensor(input) paddingMarker Not used, should be NULL (input)
- Returns
- miopenStatus_t
◆ miopenSetRNNDescriptor()
miopenStatus_t miopenSetRNNDescriptor | ( | miopenRNNDescriptor_t | rnnDesc, |
const int | hsize, | ||
const int | nlayers, | ||
miopenRNNInputMode_t | inMode, | ||
miopenRNNDirectionMode_t | direction, | ||
miopenRNNMode_t | rnnMode, | ||
miopenRNNBiasMode_t | biasMode, | ||
miopenRNNAlgo_t | algo, | ||
miopenDataType_t | dataType | ||
) |
Set the details of the RNN descriptor.
Interface for setting the values of the RNN descriptor object. This function requires specific algorithm selection.
- Parameters
-
rnnDesc RNN layer descriptor type (input) hsize Hidden layer size (input) nlayers Number of layers (input) inMode RNN first layer input mode (input) direction RNN direction (input) rnnMode RNN model type (input) biasMode RNN bias included (input) algo RNN algorithm selected (input) dataType MIOpen datatype (input)
- Returns
- miopenStatus_t
◆ miopenSetRNNDescriptor_V2()
miopenStatus_t miopenSetRNNDescriptor_V2 | ( | miopenRNNDescriptor_t | rnnDesc, |
const int | hsize, | ||
const int | nlayers, | ||
miopenDropoutDescriptor_t | dropoutDesc, | ||
miopenRNNInputMode_t | inMode, | ||
miopenRNNDirectionMode_t | direction, | ||
miopenRNNMode_t | rnnMode, | ||
miopenRNNBiasMode_t | biasMode, | ||
miopenRNNAlgo_t | algo, | ||
miopenDataType_t | dataType | ||
) |
Set the details of the RNN descriptor version 2. This version enables the use of dropout in rnn.
Interface for setting the values of the RNN descriptor object. This function requires specific algorithm selection.
- Parameters
-
rnnDesc RNN layer descriptor type (input/output) hsize Hidden layer size (input) nlayers Number of layers (input) dropoutDesc Pre-initialized dropout descriptor for dropout layer in between RNN layers (input) inMode RNN first layer input mode (input) direction RNN direction (input) rnnMode RNN model type (input) biasMode RNN bias included (input) algo RNN algorithm selected (input) dataType MIOpen datatype (input)
- Returns
- miopenStatus_t
◆ miopenSetRNNLayerBias()
miopenStatus_t miopenSetRNNLayerBias | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
miopenTensorDescriptor_t | wDesc, | ||
void * | w, | ||
const int | biasID, | ||
miopenTensorDescriptor_t | biasDesc, | ||
const void * | layerBias | ||
) |
Sets a bias for a specific layer in an RNN stack.
This function sets the bias data for a specific layer and bias ID.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the weight matrix associated with the in input GEMM, while biasID == 1 retrieves the bias associated with the hidden state GEMM.
For miopenLSTM biasID 0 to 3 refer to the biases associated with the input GEMM, 4-7 are associated with the biases associated with the hidden state GEMM.
- biasID 0 and 4 are for the input gate.
- biasID 1 and 5 are for the forget gate.
- biasID 2 and 6 are for the output gate.
- biasID 3 and 7 are for the new memory gate.
For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- biasID 0 and 3 are for the update gate.
- biasID 1 and 4 are for the reset gate.
- biasID 2 and 5 are for the new new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
The input argument biasDesc is a previously populated tensor descriptor typically by first calling miopenGetRNNLayeBias().
Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenSetRNNLayerBias will return a error status miopenStatusBadParm for input biasID associated with the input GEMM.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) wDesc A tensor descriptor to the bias tensor (input) w Pointer to memory containing bias tensor (input) biasID ID of the internal bias tensor (input) biasDesc Descriptor of the bias tensor (output) layerBias Pointer to the memory location of the bias tensor (output)
- Returns
- miopenStatus_t
◆ miopenSetRNNLayerParam()
miopenStatus_t miopenSetRNNLayerParam | ( | miopenHandle_t | handle, |
miopenRNNDescriptor_t | rnnDesc, | ||
const int | layer, | ||
miopenTensorDescriptor_t | xDesc, | ||
miopenTensorDescriptor_t | wDesc, | ||
void * | w, | ||
const int | paramID, | ||
miopenTensorDescriptor_t | paramDesc, | ||
const void * | layerParam | ||
) |
Sets a weight matrix for a specific layer in an RNN stack.
This function sets the weight matrix data for a specific layer and parameter ID.
For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 sets the weight matrix associated with the in input GEMM, while paramID == 1 sets the weight matrix associated with the hidden state GEMM.
For miopenLSTM paramID 0 to 3 refer to the weight matrices associated with the input GEMM, 4-7 are associated with matrices associated with the hidden state GEMM.
- paramID 0 and 4 are for the input gate.
- paramID 1 and 5 are for the forget gate.
- paramID 2 and 6 are for the output gate.
- paramID 3 and 7 are for the new memory gate.
For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.
- paramID 0 and 3 are for the update gate.
- paramID 1 and 4 are for the reset gate.
- paramID 2 and 5 are for the new memory gate.
For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.
The input argument paramDesc is a previously populated tensor descriptor typically by first calling miopenGetRNNLayerParam().
Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenSetRNNLayerParam() will return a error status miopenStatusBadParm for input paramID associated with the input GEMM.
- Parameters
-
handle MIOpen handle (input) rnnDesc RNN layer descriptor type (input) layer The layer number in the RNN stack (input) xDesc A tensor descriptor to input (input) wDesc A tensor descriptor to the parameter tensor (input) w Pointer to memory containing parameter tensor (input) paramID ID of the internal parameter tensor (input) paramDesc Descriptor of the parameter tensor (input) layerParam Pointer to the memory location of the parameter tensor (input)
- Returns
- miopenStatus_t
◆ miopenSetRNNPaddingMode()
miopenStatus_t miopenSetRNNPaddingMode | ( | miopenRNNDescriptor_t | rnnDesc, |
miopenRNNPaddingMode_t | paddingMode | ||
) |
Sets a bias for a specific layer in an RNN stack.
This function changes padidng mode at previously created and initialized RNN descriptor. This function must be called before using miopenGetRNNWorkspaceSize() and miopenGetRNNTrainingReserveSize() functions. By default, not padded data is expected at the RNN input/output.
- Parameters
-
rnnDesc RNN layer descriptor type (input/output) paddingMode RNN input/output data padding mode (input)
- Returns
- miopenStatus_t