RNN

RNN#

MIOpen: RNN

Enumerations

enum  miopenRNNMode_t {
  miopenRNNRELU = 0 ,
  miopenRNNTANH = 1 ,
  miopenLSTM = 2 ,
  miopenGRU = 3
}
 
enum  miopenRNNInputMode_t {
  miopenRNNlinear = 0 ,
  miopenRNNskip = 1
}
 
enum  miopenRNNAlgo_t {
  miopenRNNdefault = 0 ,
  miopenRNNfundamental
}
 
enum  miopenRNNDirectionMode_t {
  miopenRNNunidirection = 0 ,
  miopenRNNbidirection = 1
}
 
enum  miopenRNNBiasMode_t {
  miopenRNNNoBias = 0 ,
  miopenRNNwithBias = 1
}
 
enum  miopenRNNGEMMalgoMode_t { miopenRNNAlgoGEMM = 0 }
 
enum  miopenRNNPaddingMode_t {
  miopenRNNIONotPadded = 0 ,
  miopenRNNIOWithPadding = 1
}
 
enum  miopenRNNFWDMode_t {
  miopenRNNTraining = 0 ,
  miopenRNNInference = 1
}
 
enum  miopenRNNBaseLayout_t {
  miopenRNNDataUnknownLayout = 0 ,
  miopenRNNDataSeqMajorNotPadded = 1 ,
  miopenRNNDataSeqMajorPadded = 2 ,
  miopenRNNDataBatchMajorPadded = 3
}
 

Functions

 MIOPEN_DECLARE_OBJECT (miopenRNNDescriptor)
 Creates the miopenRNNDescriptor_t type. More...
 
miopenStatus_t miopenCreateRNNDescriptor (miopenRNNDescriptor_t *rnnDesc)
 Create a RNN layer Descriptor. More...
 
miopenStatus_t miopenGetRNNDescriptor (miopenRNNDescriptor_t rnnDesc, miopenRNNMode_t *rnnMode, miopenRNNAlgo_t *algoMode, miopenRNNInputMode_t *inputMode, miopenRNNDirectionMode_t *dirMode, miopenRNNBiasMode_t *biasMode, int *hiddenSize, int *layer)
 Retrieves a RNN layer descriptor's details. More...
 
miopenStatus_t miopenGetRNNDescriptor_V2 (miopenRNNDescriptor_t rnnDesc, int *hiddenSize, int *layer, miopenDropoutDescriptor_t *dropoutDesc, miopenRNNInputMode_t *inputMode, miopenRNNDirectionMode_t *dirMode, miopenRNNMode_t *rnnMode, miopenRNNBiasMode_t *biasMode, miopenRNNAlgo_t *algoMode, miopenDataType_t *dataType)
 Retrieves a RNN layer descriptor's details version 2. This version enables retrieving information of the dropout descriptor of the rnn descriptor. More...
 
miopenStatus_t miopenDestroyRNNDescriptor (miopenRNNDescriptor_t rnnDesc)
 Destroys the tensor descriptor object. More...
 
miopenStatus_t miopenSetRNNDescriptor (miopenRNNDescriptor_t rnnDesc, const int hsize, const int nlayers, miopenRNNInputMode_t inMode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnnMode, miopenRNNBiasMode_t biasMode, miopenRNNAlgo_t algo, miopenDataType_t dataType)
 Set the details of the RNN descriptor. More...
 
miopenStatus_t miopenSetRNNDescriptor_V2 (miopenRNNDescriptor_t rnnDesc, const int hsize, const int nlayers, miopenDropoutDescriptor_t dropoutDesc, miopenRNNInputMode_t inMode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnnMode, miopenRNNBiasMode_t biasMode, miopenRNNAlgo_t algo, miopenDataType_t dataType)
 Set the details of the RNN descriptor version 2. This version enables the use of dropout in rnn. More...
 
miopenStatus_t miopenSetRNNDataSeqTensorDescriptor (miopenSeqTensorDescriptor_t seqTensorDesc, miopenDataType_t dataType, miopenRNNBaseLayout_t layout, int maxSequenceLen, int batchSize, int vectorSize, const int *sequenceLenArray, void *paddingMarker)
 Set shape of RNN seqData tensor. More...
 
miopenStatus_t miopenGetRNNDataSeqTensorDescriptor (miopenSeqTensorDescriptor_t seqTensorDesc, miopenDataType_t *dataType, miopenRNNBaseLayout_t *layout, int *maxSequenceLen, int *batchSize, int *vectorSize, int sequenceLenArrayLimit, int *sequenceLenArray, void *paddingMarker)
 Get shape of RNN seqData tensor. More...
 
miopenStatus_t miopenGetRNNWorkspaceSize (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, size_t *numBytes)
 Query the amount of memory required to execute the RNN layer. More...
 
miopenStatus_t miopenGetRNNTrainingReserveSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, size_t *numBytes)
 Query the amount of memory required for RNN training. More...
 
miopenStatus_t miopenGetRNNTempSpaceSizes (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, miopenSeqTensorDescriptor_t xDesc, miopenRNNFWDMode_t fwdMode, size_t *workSpaceSize, size_t *reserveSpaceSize)
 Query the amount of additional memory required for this RNN layer execution. More...
 
miopenStatus_t miopenGetRNNParamsSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, miopenTensorDescriptor_t xDesc, size_t *numBytes, miopenDataType_t dtype)
 Query the amount of parameter memory required for RNN training. More...
 
miopenStatus_t miopenGetRNNParamsDescriptor (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, miopenDataType_t dtype)
 Obtain a weight tensor descriptor for RNNs. More...
 
miopenStatus_t miopenGetRNNInputTensorSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int seqLen, miopenTensorDescriptor_t *xDesc, size_t *numBytes)
 Obtain a the size in bytes of the RNN input tensor. More...
 
miopenStatus_t miopenGetRNNHiddenTensorSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int seqLen, miopenTensorDescriptor_t *xDesc, size_t *numBytes)
 Obtain a the size in bytes of the RNN hidden tensor. More...
 
miopenStatus_t miopenGetRNNLayerParamSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, const int paramID, size_t *numBytes)
 Gets the number of bytes of a parameter matrix. More...
 
miopenStatus_t miopenGetRNNLayerBiasSize (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, const int biasID, size_t *numBytes)
 Gets the number of bytes of a bias. More...
 
miopenStatus_t miopenGetRNNLayerParam (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, const void *w, const int paramID, miopenTensorDescriptor_t paramDesc, void *layerParam)
 Gets a weight matrix for a specific layer in an RNN stack. More...
 
miopenStatus_t miopenGetRNNLayerBias (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, const void *w, const int biasID, miopenTensorDescriptor_t biasDesc, void *layerBias)
 Gets a bias for a specific layer in an RNN stack. More...
 
miopenStatus_t miopenGetRNNLayerParamOffset (miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, const int paramID, miopenTensorDescriptor_t paramDesc, size_t *layerParamOffset)
 Gets an index offset for a specific weight matrix for a layer in the RNN stack. More...
 
miopenStatus_t miopenGetRNNLayerBiasOffset (miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, const int biasID, miopenTensorDescriptor_t biasDesc, size_t *layerBiasOffset)
 Gets a bias index offset for a specific layer in an RNN stack. More...
 
miopenStatus_t miopenSetRNNLayerParam (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, void *w, const int paramID, miopenTensorDescriptor_t paramDesc, const void *layerParam)
 Sets a weight matrix for a specific layer in an RNN stack. More...
 
miopenStatus_t miopenSetRNNLayerBias (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int layer, miopenTensorDescriptor_t xDesc, miopenTensorDescriptor_t wDesc, void *w, const int biasID, miopenTensorDescriptor_t biasDesc, const void *layerBias)
 Sets a bias for a specific layer in an RNN stack. More...
 
miopenStatus_t miopenSetRNNPaddingMode (miopenRNNDescriptor_t rnnDesc, miopenRNNPaddingMode_t paddingMode)
 Sets a bias for a specific layer in an RNN stack. More...
 
miopenStatus_t miopenGetRNNPaddingMode (miopenRNNDescriptor_t rnnDesc, miopenRNNPaddingMode_t *paddingMode)
 This function retrieves the RNN padding mode from the RNN descriptor. More...
 
miopenStatus_t miopenRNNForward (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, miopenRNNFWDMode_t fwdMode, const miopenSeqTensorDescriptor_t xDesc, const void *x, const miopenTensorDescriptor_t hDesc, const void *hx, void *hy, const miopenTensorDescriptor_t cDesc, const void *cx, void *cy, const miopenSeqTensorDescriptor_t yDesc, void *y, const void *w, size_t weightSpaceSize, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes)
 Execute forward training for recurrent layer. More...
 
miopenStatus_t miopenRNNBackwardSeqData (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const miopenSeqTensorDescriptor_t yDesc, const void *y, const void *dy, const miopenTensorDescriptor_t hDesc, const void *hx, const void *dhy, void *dhx, const miopenTensorDescriptor_t cDesc, const void *cx, const void *dcy, void *dcx, const miopenSeqTensorDescriptor_t xDesc, void *dx, const void *w, size_t weightSpaceSize, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes)
 Execute backward data for recurrent layer. More...
 
miopenStatus_t miopenRNNBackwardWeightsSeqTensor (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const miopenSeqTensorDescriptor_t xDesc, const void *x, const miopenTensorDescriptor_t hDesc, const void *hx, const miopenSeqTensorDescriptor_t yDesc, const void *y, void *dw, size_t weightSpaceSize, void *workSpace, size_t workSpaceNumBytes, const void *reserveSpace, size_t reserveSpaceNumBytes)
 Execute backward weights for recurrent layer. More...
 
miopenStatus_t miopenRNNForwardTraining (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, const void *x, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t cxDesc, const void *cx, const miopenTensorDescriptor_t wDesc, const void *w, const miopenTensorDescriptor_t *yDesc, void *y, const miopenTensorDescriptor_t hyDesc, void *hy, const miopenTensorDescriptor_t cyDesc, void *cy, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes)
 Execute forward training for recurrent layer. More...
 
miopenStatus_t miopenRNNBackwardData (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *yDesc, const void *y, const miopenTensorDescriptor_t *dyDesc, const void *dy, const miopenTensorDescriptor_t dhyDesc, const void *dhy, const miopenTensorDescriptor_t dcyDesc, const void *dcy, const miopenTensorDescriptor_t wDesc, const void *w, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t cxDesc, const void *cx, const miopenTensorDescriptor_t *dxDesc, void *dx, const miopenTensorDescriptor_t dhxDesc, void *dhx, const miopenTensorDescriptor_t dcxDesc, void *dcx, void *workSpace, size_t workSpaceNumBytes, void *reserveSpace, size_t reserveSpaceNumBytes)
 Execute backward data for recurrent layer. More...
 
miopenStatus_t miopenRNNBackwardWeights (miopenHandle_t handle, const miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, const void *x, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t *yDesc, const void *y, const miopenTensorDescriptor_t dwDesc, void *dw, void *workSpace, size_t workSpaceNumBytes, const void *reserveSpace, size_t reserveSpaceNumBytes)
 Execute backward weights for recurrent layer. More...
 
miopenStatus_t miopenRNNForwardInference (miopenHandle_t handle, miopenRNNDescriptor_t rnnDesc, const int sequenceLen, const miopenTensorDescriptor_t *xDesc, const void *x, const miopenTensorDescriptor_t hxDesc, const void *hx, const miopenTensorDescriptor_t cxDesc, const void *cx, const miopenTensorDescriptor_t wDesc, const void *w, const miopenTensorDescriptor_t *yDesc, void *y, const miopenTensorDescriptor_t hyDesc, void *hy, const miopenTensorDescriptor_t cyDesc, void *cy, void *workSpace, size_t workSpaceNumBytes)
 Execute forward inference for RNN layer. More...
 

Detailed Description

Enumeration Type Documentation

◆ miopenRNNAlgo_t

Recurrent Neural Network algorithm mode

Enumerator
miopenRNNdefault 

Use dedicated gate-operation kernel for LSTM and fundamental algorithm for vanilla RNN & GRU

miopenRNNfundamental 

Function by basic tesnsor operations, supported for vanilla RNN, LSTM, GRU

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNBaseLayout_t

Data layouts for RNN operations

Enumerator
miopenRNNDataUnknownLayout 
miopenRNNDataSeqMajorNotPadded 
miopenRNNDataSeqMajorPadded 
miopenRNNDataBatchMajorPadded 
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNBiasMode_t

Recurrent Neural Network add on bias

Enumerator
miopenRNNNoBias 

No Biases will be applied to GEMM operations

miopenRNNwithBias 

Biases will be applied to GEMM operations

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNDirectionMode_t

Recurrent Neural Network bi-directional behavior

Enumerator
miopenRNNunidirection 

Forward in time only.

miopenRNNbidirection 

Forward and backwards in time.

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNFWDMode_t

Recurrent Neural Network Training/Inference mode

Enumerator
miopenRNNTraining 

FWD, BWD, WRW

miopenRNNInference 

Only FWD-inference no back-propagation

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNGEMMalgoMode_t

◆ miopenRNNInputMode_t

Recurrent Neural Network layer initial input mode

Enumerator
miopenRNNlinear 

Matrix multiplication at the input of the first layer

miopenRNNskip 

No operation is performed at the input of the first layer.

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNMode_t

RNN mode selection for rnn layer preference

Enumerator
miopenRNNRELU 

RNN with ReLU activation

miopenRNNTANH 

RNN with tanh activation

miopenLSTM 

LSTM

miopenGRU 

GRU

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNPaddingMode_t

Recurrent Neural Network input/output data padding mode

Enumerator
miopenRNNIONotPadded 

Not padded data at RNN input/output

miopenRNNIOWithPadding 

Padded data at RNN input/output

Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

Function Documentation

◆ MIOPEN_DECLARE_OBJECT()

MIOPEN_DECLARE_OBJECT ( miopenRNNDescriptor  )

Creates the miopenRNNDescriptor_t type.

◆ miopenCreateRNNDescriptor()

miopenStatus_t miopenCreateRNNDescriptor ( miopenRNNDescriptor_t *  rnnDesc)

Create a RNN layer Descriptor.

API for creating an uninitialized RNN layer descriptor.

Parameters
rnnDescPointer to a tensor descriptor type
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenDestroyRNNDescriptor()

miopenStatus_t miopenDestroyRNNDescriptor ( miopenRNNDescriptor_t  rnnDesc)

Destroys the tensor descriptor object.

Parameters
rnnDescRNN tensor descriptor type (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNDataSeqTensorDescriptor()

miopenStatus_t miopenGetRNNDataSeqTensorDescriptor ( miopenSeqTensorDescriptor_t  seqTensorDesc,
miopenDataType_t dataType,
miopenRNNBaseLayout_t layout,
int *  maxSequenceLen,
int *  batchSize,
int *  vectorSize,
int  sequenceLenArrayLimit,
int *  sequenceLenArray,
void *  paddingMarker 
)

Get shape of RNN seqData tensor.

Interface for setting tensor shape to be used as RNN input data

Parameters
seqTensorDescTensor descriptor (input)
dataTypeMIOpen datatype (output)
layoutOne of the main supported layouts for RNN data(output)
maxSequenceLenSequence length limit within this SeqTensor(output)
batchSizeNumber of sequences within this SeqTensor (output)
vectorSizeVector size (output)
sequenceLenArrayLimitLimit for number of elements that can be returned to user by sequenceLenArray (input)
sequenceLenArrayArray containing the length of each sequence in the SeqTensor. This is allowed to be a NULL pointer if sequenceLenArrayLimit is 0 (output)
paddingMarkerNot used, should be NULL (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNDescriptor()

miopenStatus_t miopenGetRNNDescriptor ( miopenRNNDescriptor_t  rnnDesc,
miopenRNNMode_t rnnMode,
miopenRNNAlgo_t algoMode,
miopenRNNInputMode_t inputMode,
miopenRNNDirectionMode_t dirMode,
miopenRNNBiasMode_t biasMode,
int *  hiddenSize,
int *  layer 
)

Retrieves a RNN layer descriptor's details.

Parameters
rnnDescRNN layer descriptor (input)
rnnModeRNN mode (output)
algoModeRNN algorithm mode (output)
inputModeRNN data input mode (output)
dirModeUni or bi direction mode (output)
biasModeBias used (output)
hiddenSizeSize of hidden state (output)
layerNumber of stacked layers (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNDescriptor_V2()

miopenStatus_t miopenGetRNNDescriptor_V2 ( miopenRNNDescriptor_t  rnnDesc,
int *  hiddenSize,
int *  layer,
miopenDropoutDescriptor_t *  dropoutDesc,
miopenRNNInputMode_t inputMode,
miopenRNNDirectionMode_t dirMode,
miopenRNNMode_t rnnMode,
miopenRNNBiasMode_t biasMode,
miopenRNNAlgo_t algoMode,
miopenDataType_t dataType 
)

Retrieves a RNN layer descriptor's details version 2. This version enables retrieving information of the dropout descriptor of the rnn descriptor.

Parameters
rnnDescRNN layer descriptor (input)
hiddenSizeSize of hidden state (output)
layerNumber of stacked layers (output)
dropoutDescPre-configured dropout descriptor for dropout layer in between RNN layers (output)
inputModeRNN data input mode (output)
dirModeUni or bi direction mode (output)
rnnModeRNN mode (output)
biasModeBias used (output)
algoModeRNN algorithm mode (output)
dataTypeData type of RNN (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNHiddenTensorSize()

miopenStatus_t miopenGetRNNHiddenTensorSize ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  seqLen,
miopenTensorDescriptor_t *  xDesc,
size_t *  numBytes 
)

Obtain a the size in bytes of the RNN hidden tensor.

This function determines the size in bytes of the allocation needed for the hidden tensor over all layers

Parameters
handleMIOpen handle (input)
rnnDescFully populated RNN layer descriptor type (input)
seqLenNumber of iteration unrolls (input)
xDescAn array of previously populated tensor descriptors (input)
numBytesNumber of bytes required for input tensor (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNInputTensorSize()

miopenStatus_t miopenGetRNNInputTensorSize ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  seqLen,
miopenTensorDescriptor_t *  xDesc,
size_t *  numBytes 
)

Obtain a the size in bytes of the RNN input tensor.

This function determines the size in bytes of the allocation needed for the input data tensor for an RNN layer. The number of bytes is derived from the array of tensor descriptors.

Parameters
handleMIOpen handle (input)
rnnDescFully populated RNN layer descriptor (input)
seqLenNumber of iteration unrolls (input)
xDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
numBytesNumber of bytes required for input tensor (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNLayerBias()

miopenStatus_t miopenGetRNNLayerBias ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
miopenTensorDescriptor_t  wDesc,
const void *  w,
const int  biasID,
miopenTensorDescriptor_t  biasDesc,
void *  layerBias 
)

Gets a bias for a specific layer in an RNN stack.

This function retrieves the bias data for a specific layer and bias ID and copies the data into previously allocated device memory.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the bias associated with the in input GEMM, while biasID == 1 retrieves the bias associated with the hidden state GEMM.

For miopenLSTM biasID 0 to 3 refer to the biases associated with the input GEMM, 4-7 are associated with biases associated with the hidden state GEMM.

  • biasID 0 and 4 are for the input gate.
  • biasID 1 and 5 are for the forget gate.
  • biasID 2 and 6 are for the output gate.
  • biasID 3 and 7 are for the new memory gate.

For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • biasID 0 and 3 are for the update gate.
  • biasID 1 and 4 are for the reset gate.
  • biasID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

The output argument biasDesc is a previously created tensor descriptor that is populated to describe the memory layout of the bias. It is full packed and is used when calling to miopenSetRNNLayerBias()

The argument layerBias should either be nullptr, or have device memory allocated to allow copying of the entire layer bias into it. If layerBias is nullptr then only the biasDesc is populated and returned. The size in bytes of the layer bias can be determined by using miopenGetRNNLayerBiasSize().

Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerBias() will return a error status miopenStatusBadParm for input biasID associated with the input GEMM.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
wDescA tensor descriptor to the parameter tensor (input)
wPointer to memory containing parameter tensor (input)
biasIDID of the internal parameter tensor (input)
biasDescDescriptor of the parameter tensor (output)
layerBiasPointer to the memory location of the bias tensor (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNLayerBiasOffset()

miopenStatus_t miopenGetRNNLayerBiasOffset ( miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
const int  biasID,
miopenTensorDescriptor_t  biasDesc,
size_t *  layerBiasOffset 
)

Gets a bias index offset for a specific layer in an RNN stack.

This function retrieves the bias index offset for a specific layer and bias ID.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the bias associated with the in input GEMM, while biasID == 1 retrieves the weight matrix associated with the hidden state GEMM.

For miopenLSTM biasID 0 to 3 refer to the bias offset associated with the input GEMM, 4-7 are the bias offsets associated with the hidden state GEMM.

  • biasID 0 and 4 are for the input gate.
  • biasID 1 and 5 are for the forget gate.
  • biasID 2 and 6 are for the output gate.
  • biasID 3 and 7 are for the new memory gate.

For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • biasID 0 and 3 are for the update gate.
  • biasID 1 and 4 are for the reset gate.
  • biasID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

The output argument biasDesc is a previously created tensor descriptor that is populated to describe the memory layout of the bias. It is full packed and is used when calling to miopenSetRNNLayerBias()

The argument layerBiasOffset should either be nullptr, or point to an output address. If layerBias is nullptr then only the biasDesc is populated and returned.

Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerBiasOffset() will return a error status miopenStatusBadParm for input biasID associated with the input GEMM.

Parameters
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
biasIDID of the internal parameter tensor (input)
biasDescDescriptor of the parameter tensor (output)
layerBiasOffsetPointer to the memory location of the bias tensor (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNLayerBiasSize()

miopenStatus_t miopenGetRNNLayerBiasSize ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  layer,
const int  biasID,
size_t *  numBytes 
)

Gets the number of bytes of a bias.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the weight matrix associated with the in input GEMM, while biasID == 1 retrieves the bias associated with the hidden state GEMM.

For miopenLSTM biasID 0 to 3 refer to the biases associated with the input GEMM, 4-7 are associated with biases associated with the hidden state GEMM.

  • biasID 0 and 4 are for the input gate.
  • biasID 1 and 5 are for the forget gate.
  • biasID 2 and 6 are for the output gate.
  • biasID 3 and 7 are for the new memory gate.

For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • biasID 0 and 3 are for the update gate.
  • biasID 1 and 4 are for the reset gate.
  • biasID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
biasIDID of the internal parameter tensor (input)
numBytesThe number of bytes of the layer's bias (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNLayerParam()

miopenStatus_t miopenGetRNNLayerParam ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
miopenTensorDescriptor_t  wDesc,
const void *  w,
const int  paramID,
miopenTensorDescriptor_t  paramDesc,
void *  layerParam 
)

Gets a weight matrix for a specific layer in an RNN stack.

This function retrieves the weight matrix data for a specific layer and parameter ID and copies the data into previously allocated device memory.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 retrieves the weight matrix associated with the in input GEMM, while paramID == 1 retrieves the weight matrix associated with the hidden state GEMM.

For miopenLSTM paramID 0 to 3 refer to the weight matrices associated with the input GEMM, 4-7 are associated with matrices associated with the hidden state GEMM.

  • paramID 0 and 4 are for the input gate.
  • paramID 1 and 5 are for the forget gate.
  • paramID 2 and 6 are for the output gate.
  • paramID 3 and 7 are for the new memory gate.

For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • paramID 0 and 3 are for the update gate.
  • paramID 1 and 4 are for the reset gate.
  • paramID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

The output argument paramDesc is a previously created tensor descriptor that is populated to describe the memory layout of the parameter matrix. It is full packed and is used when calling to miopenSetRNNLayerParam()

The argument layerParam should either be nullptr, or have device memory allocated to allow copying of the entire layer parameter matrix into it. If layerParam is nullptr then only the paramDesc is populated and returned. The size in bytes of the layer parameter matrix can be determined by using miopenGetRNNLayerParamSize().

Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerParam() will return a error status miopenStatusBadParm for input paramID associated with the input GEMM.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
wDescA tensor descriptor to the parameter tensor (input)
wPointer to memory containing parameter tensor (input)
paramIDID of the internal parameter tensor (input)
paramDescTensor descriptor for the fully packed output parameter tensor (output)
layerParamPointer to the memory location of the parameter tensor (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNLayerParamOffset()

miopenStatus_t miopenGetRNNLayerParamOffset ( miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
const int  paramID,
miopenTensorDescriptor_t  paramDesc,
size_t *  layerParamOffset 
)

Gets an index offset for a specific weight matrix for a layer in the RNN stack.

This function retrieves the index offset for a weight matrix in a layer.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 retrieves the weight matrix offset associated with the in input GEMM, while paramID == 1 retrieves the weight matrix offset associated with the hidden state GEMM.

For miopenLSTM paramID 0 to 3 refer to the weight matrix offsets associated with the input GEMM, 4-7 are associated with matrix offset associated with the hidden state GEMM.

  • paramID 0 and 4 are for the input gate.
  • paramID 1 and 5 are for the forget gate.
  • paramID 2 and 6 are for the output gate.
  • paramID 3 and 7 are for the new memory gate.

For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • paramID 0 and 3 are for the update gate.
  • paramID 1 and 4 are for the reset gate.
  • paramID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

The output argument paramDesc is a previously created tensor descriptor that is populated to describe the memory layout of the parameter matrix. It is full packed and is used when calling to miopenSetRNNLayerParam().

The argument layerParamOffset should either be nullptr, or an address to place the offset. If layerParamOffset is nullptr then only the paramDesc is populated and returned.

Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenGetRNNLayerParamOffset() will return a error status miopenStatusBadParm for input paramID associated with the input GEMM.

Parameters
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
paramIDID of the internal parameter tensor (input)
paramDescTensor descriptor for the fully packed output parameter tensor (output)
layerParamOffsetLocation for the parameter offset (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNLayerParamSize()

miopenStatus_t miopenGetRNNLayerParamSize ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
const int  paramID,
size_t *  numBytes 
)

Gets the number of bytes of a parameter matrix.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 retrieves the weight matrix associated with the in input GEMM, while paramID == 1 retrieves the weight matrix associated with the hidden state GEMM.

For miopenLSTM paramID 0 to 3 refer to the weight matrices associated with the input GEMM, 4-7 are associated with matrices associated with the hidden state GEMM.

  • paramID 0 and 4 are for the input gate.
  • paramID 1 and 5 are for the forget gate.
  • paramID 2 and 6 are for the output gate.
  • paramID 3 and 7 are for the new memory gate.

For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • paramID 0 and 3 are for the update gate.
  • paramID 1 and 4 are for the reset gate.
  • paramID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
paramIDID of the internal parameter tensor (input)
numBytesThe number of bytes of the layer's parameter matrix (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNPaddingMode()

miopenStatus_t miopenGetRNNPaddingMode ( miopenRNNDescriptor_t  rnnDesc,
miopenRNNPaddingMode_t paddingMode 
)

This function retrieves the RNN padding mode from the RNN descriptor.

Parameters
rnnDescRNN layer descriptor type (input)
paddingModePointer to the RNN padding mode (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNParamsDescriptor()

miopenStatus_t miopenGetRNNParamsDescriptor ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
miopenTensorDescriptor_t  xDesc,
miopenTensorDescriptor_t  wDesc,
miopenDataType_t  dtype 
)

Obtain a weight tensor descriptor for RNNs.

This function populates a weight descriptor that describes the memory layout of the weight matrix.

Parameters
handleMIOpen handle (input)
rnnDescFully populated RNN layer descriptor type (input)
xDescA previously populated tensor descriptor (input)
wDescA previously allocated tensor descriptor (output)
dtypeMIOpen data type enum (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNParamsSize()

miopenStatus_t miopenGetRNNParamsSize ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
miopenTensorDescriptor_t  xDesc,
size_t *  numBytes,
miopenDataType_t  dtype 
)

Query the amount of parameter memory required for RNN training.

This function calculates the amount of parameter memory required to train the RNN layer given an RNN descriptor and a tensor descriptor.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
xDescA tensor descriptor (input)
numBytesNumber of bytes required for RNN layer execution (output)
dtypeMIOpen data type enum (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNTempSpaceSizes()

miopenStatus_t miopenGetRNNTempSpaceSizes ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
miopenSeqTensorDescriptor_t  xDesc,
miopenRNNFWDMode_t  fwdMode,
size_t *  workSpaceSize,
size_t *  reserveSpaceSize 
)

Query the amount of additional memory required for this RNN layer execution.

This function calculates the size of extra buffers, depending on the layer configuration, which is determined by: RNN descriptor, isInference, and data descriptor. If isInference is True, reserve_space_size is always zero, because the reserve_space buffer is not used in Inference computation.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
xDescSequence data tensor descriptor (input)
fwdModeSpecifies in which mode the buffers will be used.
workSpaceSizeMinimum WorkSpace buffer size required for RNN layer execution (output)
reserveSpaceSizeMinimum ReserveSpaceSize buffer size required for RNN layer execution (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNTrainingReserveSize()

miopenStatus_t miopenGetRNNTrainingReserveSize ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  sequenceLen,
const miopenTensorDescriptor_t *  xDesc,
size_t *  numBytes 
)

Query the amount of memory required for RNN training.

This function calculates the amount of memory required to train the RNN layer given an RNN descriptor and a tensor descriptor.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
sequenceLenNumber of iteration unrolls (input)
xDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
numBytesNumber of bytes required for RNN layer execution (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenGetRNNWorkspaceSize()

miopenStatus_t miopenGetRNNWorkspaceSize ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
const int  sequenceLen,
const miopenTensorDescriptor_t *  xDesc,
size_t *  numBytes 
)

Query the amount of memory required to execute the RNN layer.

This function calculates the amount of memory required to run the RNN layer given an RNN descriptor and a tensor descriptor.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
sequenceLenNumber of iteration unrolls (input)
xDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
numBytesNumber of bytes required for RNN layer execution (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNBackwardData()

miopenStatus_t miopenRNNBackwardData ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
const int  sequenceLen,
const miopenTensorDescriptor_t *  yDesc,
const void *  y,
const miopenTensorDescriptor_t *  dyDesc,
const void *  dy,
const miopenTensorDescriptor_t  dhyDesc,
const void *  dhy,
const miopenTensorDescriptor_t  dcyDesc,
const void *  dcy,
const miopenTensorDescriptor_t  wDesc,
const void *  w,
const miopenTensorDescriptor_t  hxDesc,
const void *  hx,
const miopenTensorDescriptor_t  cxDesc,
const void *  cx,
const miopenTensorDescriptor_t *  dxDesc,
void *  dx,
const miopenTensorDescriptor_t  dhxDesc,
void *  dhx,
const miopenTensorDescriptor_t  dcxDesc,
void *  dcx,
void *  workSpace,
size_t  workSpaceNumBytes,
void *  reserveSpace,
size_t  reserveSpaceNumBytes 
)

Execute backward data for recurrent layer.

Interface for executing the backward data pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
sequenceLenTemporal iterations to unroll (input)
yDescAn array of tensor descriptors (input)
yPointer to input tensor (input)
dyDescAn array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input)
dyPointer to the hidden layer input tensor (input)
dhyDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
dhyPointer to the cell layer input tensor (input)
dcyDescA cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
dcyPointer to the cell layer input tensor. If dcy is NULL, then the initial delta cell state will be zero initialized. (input)
wDescA weights tensor descriptor (input)
wPointer to input weights tensor (input)
hxDescAn input hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
cxDescA input cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cxPointer to the hidden layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input)
dxDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
dxPointer to the cell layer output tensor (output)
dhxDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
dhxPointer to the delta hidden layer output tensor. If dhx is NULL the hidden gradient will not ouput. (output)
dcxDescA tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
dcxPointer to the cell layer output tensor. If dcx is NULL the cell gradient will not ouput. (output)
workSpacePointer to memory allocated for forward training (input)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
reserveSpacePointer to memory allocated for random states (input / output)
reserveSpaceNumBytesNumber of allocated bytes in memory for use in the forward (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNBackwardSeqData()

miopenStatus_t miopenRNNBackwardSeqData ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
const miopenSeqTensorDescriptor_t  yDesc,
const void *  y,
const void *  dy,
const miopenTensorDescriptor_t  hDesc,
const void *  hx,
const void *  dhy,
void *  dhx,
const miopenTensorDescriptor_t  cDesc,
const void *  cx,
const void *  dcy,
void *  dcx,
const miopenSeqTensorDescriptor_t  xDesc,
void *  dx,
const void *  w,
size_t  weightSpaceSize,
void *  workSpace,
size_t  workSpaceNumBytes,
void *  reserveSpace,
size_t  reserveSpaceNumBytes 
)

Execute backward data for recurrent layer.

Interface for executing the backward data pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
yDescAn output tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor function.(input)
yPointer to input tensor (input)
dyPointer to the hidden layer input tensor (input)
hDescAn input hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
dhyPointer to the cell layer input tensor (input)
dhxPointer to the delta hidden layer output tensor. If dhx is NULL the hidden gradient will not ouput. (output)
cDescA input cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cxPointer to the hidden layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input)
dcyPointer to the cell layer input tensor. If dcy is NULL, then the initial delta cell state will be zero initialized. (input)
dcxPointer to the cell layer output tensor. If dcx is NULL the cell gradient will not ouput. (output)
xDescAn input tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor function.(input)
dxPointer to the cell layer output tensor (output)
wPointer to input weights tensor (input)
weightSpaceSizeNumber of allocated bytes in memory for the weights tensor
workSpacePointer to memory allocated for forward training (input)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
reserveSpacePointer to memory allocated for random states (input / output)
reserveSpaceNumBytesNumber of allocated bytes in memory for use in the forward (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNBackwardWeights()

miopenStatus_t miopenRNNBackwardWeights ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
const int  sequenceLen,
const miopenTensorDescriptor_t *  xDesc,
const void *  x,
const miopenTensorDescriptor_t  hxDesc,
const void *  hx,
const miopenTensorDescriptor_t *  yDesc,
const void *  y,
const miopenTensorDescriptor_t  dwDesc,
void *  dw,
void *  workSpace,
size_t  workSpaceNumBytes,
const void *  reserveSpace,
size_t  reserveSpaceNumBytes 
)

Execute backward weights for recurrent layer.

Interface for executing the backward weights pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
sequenceLenTemporal iterations to unroll (input)
xDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
xPointer to input tensor (input)
hxDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
yDescAn array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input)
yPointer to the output tensor (input)
dwDescA weights tensor descriptor (input)
dwPointer to input weights tensor (input / output)
workSpacePointer to memory allocated for forward training (input)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
reserveSpacePointer to memory allocated for random states (input)
reserveSpaceNumBytesNumber of allocated bytes in memory for use in the forward (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNBackwardWeightsSeqTensor()

miopenStatus_t miopenRNNBackwardWeightsSeqTensor ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
const miopenSeqTensorDescriptor_t  xDesc,
const void *  x,
const miopenTensorDescriptor_t  hDesc,
const void *  hx,
const miopenSeqTensorDescriptor_t  yDesc,
const void *  y,
void *  dw,
size_t  weightSpaceSize,
void *  workSpace,
size_t  workSpaceNumBytes,
const void *  reserveSpace,
size_t  reserveSpaceNumBytes 
)

Execute backward weights for recurrent layer.

Interface for executing the backward weights pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
xDescAn input tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor function.(input)
xPointer to input tensor (input)
hDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
yDescAn output tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor function.(input)
yPointer to the output tensor (input)
dwPointer to input weights tensor (input / output)
weightSpaceSizeNumber of allocated bytes in memory for the weights tensor
workSpacePointer to memory allocated for forward training (input)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
reserveSpacePointer to memory allocated for random states (input)
reserveSpaceNumBytesNumber of allocated bytes in memory for use in the forward (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNForward()

miopenStatus_t miopenRNNForward ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
miopenRNNFWDMode_t  fwdMode,
const miopenSeqTensorDescriptor_t  xDesc,
const void *  x,
const miopenTensorDescriptor_t  hDesc,
const void *  hx,
void *  hy,
const miopenTensorDescriptor_t  cDesc,
const void *  cx,
void *  cy,
const miopenSeqTensorDescriptor_t  yDesc,
void *  y,
const void *  w,
size_t  weightSpaceSize,
void *  workSpace,
size_t  workSpaceNumBytes,
void *  reserveSpace,
size_t  reserveSpaceNumBytes 
)

Execute forward training for recurrent layer.

Interface for executing the forward training / inference pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
fwdModeSpecifies in which mode the buffers will be used.
xDescAn input tensor descriptor for sequenced RNN data. This miopenSeqTensorDescriptor_t should be initialyzed by miopenSetRNNDataSeqTensorDescriptor function.(input)
xPointer to input tensor (input)
hDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
hyPointer to the hidden layer output tensor. If hy is NULL, then the final hidden state will not be saved. (output)
cDescA cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cxPointer to the cell layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input)
cyPointer to the cell layer output tensor. If hy is NULL, then the final cell state will not be saved. (output)
yDescAn array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input)
yPointer to output tensor (output)
wPointer to input weights tensor (input)
weightSpaceSizeNumber of allocated bytes in memory for the weights tensor
workSpacePointer to memory allocated for forward (input / output)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
reserveSpacePointer to memory allocated for hidden states used durning training (input / output)
reserveSpaceNumBytesNumber of allocated bytes in memory for use in the forward (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNForwardInference()

miopenStatus_t miopenRNNForwardInference ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  sequenceLen,
const miopenTensorDescriptor_t *  xDesc,
const void *  x,
const miopenTensorDescriptor_t  hxDesc,
const void *  hx,
const miopenTensorDescriptor_t  cxDesc,
const void *  cx,
const miopenTensorDescriptor_t  wDesc,
const void *  w,
const miopenTensorDescriptor_t *  yDesc,
void *  y,
const miopenTensorDescriptor_t  hyDesc,
void *  hy,
const miopenTensorDescriptor_t  cyDesc,
void *  cy,
void *  workSpace,
size_t  workSpaceNumBytes 
)

Execute forward inference for RNN layer.

Interface for executing the forward inference pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
sequenceLenTemporal iterations to unroll (input)
xDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
xPointer to input tensor (input)
hxDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
cxDescA cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cxPointer to the cell layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input)
wDescA weights tensor descriptor (input)
wPointer to input weights tensor (input)
yDescAn array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input)
yPointer to output tensor (output)
hyDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hyPointer to the hidden layer output tensor. If hy is NULL, then the final hidden state will not be saved. (output)
cyDescA output cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cyPointer to the cell layer output tensor. If cy is NULL, then the final cell state will not be saved. (output)
workSpacePointer to memory allocated for forward training (input)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenRNNForwardTraining()

miopenStatus_t miopenRNNForwardTraining ( miopenHandle_t  handle,
const miopenRNNDescriptor_t  rnnDesc,
const int  sequenceLen,
const miopenTensorDescriptor_t *  xDesc,
const void *  x,
const miopenTensorDescriptor_t  hxDesc,
const void *  hx,
const miopenTensorDescriptor_t  cxDesc,
const void *  cx,
const miopenTensorDescriptor_t  wDesc,
const void *  w,
const miopenTensorDescriptor_t *  yDesc,
void *  y,
const miopenTensorDescriptor_t  hyDesc,
void *  hy,
const miopenTensorDescriptor_t  cyDesc,
void *  cy,
void *  workSpace,
size_t  workSpaceNumBytes,
void *  reserveSpace,
size_t  reserveSpaceNumBytes 
)

Execute forward training for recurrent layer.

Interface for executing the forward training pass on a RNN.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
sequenceLenTemporal iterations to unroll (input)
xDescAn array of tensor descriptors. These are the input descriptors to each time step. The first dimension of each descriptor is the batch size and may decrease from element n to element n+1 and not increase in size. The second dimension is the same for all descriptors in the array and is the input vector length. (input)
xPointer to input tensor (input)
hxDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hxPointer to the hidden layer input tensor. If hx is NULL, then the initial hidden state will be zero initialized. (input)
cxDescA cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cxPointer to the cell layer input tensor. If cx is NULL, then the initial cell state will be zero initialized. (input)
wDescA weights tensor descriptor (input)
wPointer to input weights tensor (input)
yDescAn array of fully packed tensor descriptors associated with the output from each time step. The first dimension of the tensor descriptors must equal the first dimension of the first descriptor (batch size) in the xDesc tensor array. The second dimension of the element of the descriptor array depends on the direction mode selected. If the direction mode is unidirectional, the second dimension is the hiddenSize. If direction mode is bidirectional the second dimension is twice the hiddenSize. (input)
yPointer to output tensor (output)
hyDescA hidden tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
hyPointer to the hidden layer output tensor. If hy is NULL, then the final hidden state will not be saved. (output)
cyDescA cell tensor descriptor that has as its first dimension of the number of layers if the direction mode is unidirectional and twice the number of layers if the direction mode is bidirectional. The second dimension of the descriptor must equal the largest first dimension of the xDesc tensor descriptor array. The third dimension equals the hiddenSize. (input)
cyPointer to the cell layer output tensor. If hy is NULL, then the final cell state will not be saved. (output)
workSpacePointer to memory allocated for forward training (input)
workSpaceNumBytesNumber of allocated bytes in memory for the workspace (input)
reserveSpacePointer to memory allocated for random states (input / output)
reserveSpaceNumBytesNumber of allocated bytes in memory for use in the forward (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenSetRNNDataSeqTensorDescriptor()

miopenStatus_t miopenSetRNNDataSeqTensorDescriptor ( miopenSeqTensorDescriptor_t  seqTensorDesc,
miopenDataType_t  dataType,
miopenRNNBaseLayout_t  layout,
int  maxSequenceLen,
int  batchSize,
int  vectorSize,
const int *  sequenceLenArray,
void *  paddingMarker 
)

Set shape of RNN seqData tensor.

Interface for setting tensor shape to be used as RNN input data

Parameters
seqTensorDescTensor descriptor (input/output)
dataTypeMIOpen datatype (input)
layoutOne of the main supported layouts for RNN data(input)
maxSequenceLenSequence length limit within this SeqTensor(input)
batchSizeNumber of sequences within this SeqTensor (input)
vectorSizeVector size (input)
sequenceLenArrayArray containing the length of each sequence in the SeqTensor(input)
paddingMarkerNot used, should be NULL (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenSetRNNDescriptor()

miopenStatus_t miopenSetRNNDescriptor ( miopenRNNDescriptor_t  rnnDesc,
const int  hsize,
const int  nlayers,
miopenRNNInputMode_t  inMode,
miopenRNNDirectionMode_t  direction,
miopenRNNMode_t  rnnMode,
miopenRNNBiasMode_t  biasMode,
miopenRNNAlgo_t  algo,
miopenDataType_t  dataType 
)

Set the details of the RNN descriptor.

Interface for setting the values of the RNN descriptor object. This function requires specific algorithm selection.

Parameters
rnnDescRNN layer descriptor type (input)
hsizeHidden layer size (input)
nlayersNumber of layers (input)
inModeRNN first layer input mode (input)
directionRNN direction (input)
rnnModeRNN model type (input)
biasModeRNN bias included (input)
algoRNN algorithm selected (input)
dataTypeMIOpen datatype (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenSetRNNDescriptor_V2()

miopenStatus_t miopenSetRNNDescriptor_V2 ( miopenRNNDescriptor_t  rnnDesc,
const int  hsize,
const int  nlayers,
miopenDropoutDescriptor_t  dropoutDesc,
miopenRNNInputMode_t  inMode,
miopenRNNDirectionMode_t  direction,
miopenRNNMode_t  rnnMode,
miopenRNNBiasMode_t  biasMode,
miopenRNNAlgo_t  algo,
miopenDataType_t  dataType 
)

Set the details of the RNN descriptor version 2. This version enables the use of dropout in rnn.

Interface for setting the values of the RNN descriptor object. This function requires specific algorithm selection.

Parameters
rnnDescRNN layer descriptor type (input/output)
hsizeHidden layer size (input)
nlayersNumber of layers (input)
dropoutDescPre-initialized dropout descriptor for dropout layer in between RNN layers (input)
inModeRNN first layer input mode (input)
directionRNN direction (input)
rnnModeRNN model type (input)
biasModeRNN bias included (input)
algoRNN algorithm selected (input)
dataTypeMIOpen datatype (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenSetRNNLayerBias()

miopenStatus_t miopenSetRNNLayerBias ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
miopenTensorDescriptor_t  wDesc,
void *  w,
const int  biasID,
miopenTensorDescriptor_t  biasDesc,
const void *  layerBias 
)

Sets a bias for a specific layer in an RNN stack.

This function sets the bias data for a specific layer and bias ID.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, biasID == 0 retrieves the weight matrix associated with the in input GEMM, while biasID == 1 retrieves the bias associated with the hidden state GEMM.

For miopenLSTM biasID 0 to 3 refer to the biases associated with the input GEMM, 4-7 are associated with the biases associated with the hidden state GEMM.

  • biasID 0 and 4 are for the input gate.
  • biasID 1 and 5 are for the forget gate.
  • biasID 2 and 6 are for the output gate.
  • biasID 3 and 7 are for the new memory gate.

For miopenGRU biasID 0 to 2 refer to the biases associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • biasID 0 and 3 are for the update gate.
  • biasID 1 and 4 are for the reset gate.
  • biasID 2 and 5 are for the new new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

The input argument biasDesc is a previously populated tensor descriptor typically by first calling miopenGetRNNLayeBias().

Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenSetRNNLayerBias will return a error status miopenStatusBadParm for input biasID associated with the input GEMM.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
wDescA tensor descriptor to the bias tensor (input)
wPointer to memory containing bias tensor (input)
biasIDID of the internal bias tensor (input)
biasDescDescriptor of the bias tensor (output)
layerBiasPointer to the memory location of the bias tensor (output)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenSetRNNLayerParam()

miopenStatus_t miopenSetRNNLayerParam ( miopenHandle_t  handle,
miopenRNNDescriptor_t  rnnDesc,
const int  layer,
miopenTensorDescriptor_t  xDesc,
miopenTensorDescriptor_t  wDesc,
void *  w,
const int  paramID,
miopenTensorDescriptor_t  paramDesc,
const void *  layerParam 
)

Sets a weight matrix for a specific layer in an RNN stack.

This function sets the weight matrix data for a specific layer and parameter ID.

For RNN vanilla miopenRNNRELU and miopenRNNTANH, paramID == 0 sets the weight matrix associated with the in input GEMM, while paramID == 1 sets the weight matrix associated with the hidden state GEMM.

For miopenLSTM paramID 0 to 3 refer to the weight matrices associated with the input GEMM, 4-7 are associated with matrices associated with the hidden state GEMM.

  • paramID 0 and 4 are for the input gate.
  • paramID 1 and 5 are for the forget gate.
  • paramID 2 and 6 are for the output gate.
  • paramID 3 and 7 are for the new memory gate.

For miopenGRU paramID 0 to 2 refer to the weight matrix offset associated with the input GEMM, while 3 through 5 are associated with the hidden state GEMM.

  • paramID 0 and 3 are for the update gate.
  • paramID 1 and 4 are for the reset gate.
  • paramID 2 and 5 are for the new memory gate.

For bi-directional RNNs the backwards in time direction is numbered as the layer directly after the forward in time direction.

The input argument paramDesc is a previously populated tensor descriptor typically by first calling miopenGetRNNLayerParam().

Note: When inputSkip mode is selected there is no input layer matrix operation, and therefore no associated memory. In this case miopenSetRNNLayerParam() will return a error status miopenStatusBadParm for input paramID associated with the input GEMM.

Parameters
handleMIOpen handle (input)
rnnDescRNN layer descriptor type (input)
layerThe layer number in the RNN stack (input)
xDescA tensor descriptor to input (input)
wDescA tensor descriptor to the parameter tensor (input)
wPointer to memory containing parameter tensor (input)
paramIDID of the internal parameter tensor (input)
paramDescDescriptor of the parameter tensor (input)
layerParamPointer to the memory location of the parameter tensor (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.

◆ miopenSetRNNPaddingMode()

miopenStatus_t miopenSetRNNPaddingMode ( miopenRNNDescriptor_t  rnnDesc,
miopenRNNPaddingMode_t  paddingMode 
)

Sets a bias for a specific layer in an RNN stack.

This function changes padidng mode at previously created and initialized RNN descriptor. This function must be called before using miopenGetRNNWorkspaceSize() and miopenGetRNNTrainingReserveSize() functions. By default, not padded data is expected at the RNN input/output.

Parameters
rnnDescRNN layer descriptor type (input/output)
paddingModeRNN input/output data padding mode (input)
Returns
miopenStatus_t
Examples
/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-miopen/checkouts/docs-6.1.1/include/miopen/miopen.h.