Operators

Contents

Operators#

Applies to Linux

2023-10-10

26 min read time

operation#

struct operation#

The operation interface represents an action an instruction will perform. All operation classes must be CopyConstructible.

bool migraphx::internal::is_context_free(const operation &x)#

Returns true if operation does not require a context to run compute.

bool migraphx::internal::has_finalize(const operation &x)#

Returns true if the operation has a finalize method.

operators#

namespace op#

Enums

enum padding_mode_t#

Values:

enumerator default_#
enumerator same_lower#
enumerator same_upper#
enum class pooling_mode#

Values:

enumerator average#
enumerator max#
enumerator lpnorm#
enum class rnn_direction#

Values:

enumerator forward#
enumerator reverse#
enumerator bidirectional#
enum class normalize_attribute#

normalize_attribute settings: Note that default options are not included as enums.

  1. use_input (default) vs. use_output: Affects the rank of the attribute. use_input -> lens.size(), use_output -> lens.size() + vec.size().

  2. use_rank (default) vs use_len: use_rank sets the max value/index of the attribute as the rank of lens. use_lens sets the max value/index as the corresponding value in lens at the axes index.

  3. clip_min vs. not_clip_min (default): Clip values less than the minimum to the minimum or not.

  4. include_min vs. exclude_min (default): Include or exclude the minimum value/index for range checking and clipping.

  5. clip_max vs. not_clip_max (default): Clip values greater than the maximum or not.

  6. include_max vs. exclude_max (default): Include or exclude the maximum value/index for range checking and clipping.

  7. normalize_padding: To normalize the padding to 2*(pad ndim) dimensions.

Values:

enumerator use_output#
enumerator use_len#
enumerator clip_max#
enumerator clip_min#
enumerator include_max#
enumerator include_min#
enumerator normalize_padding#

Functions

MIGRAPHX_EXPORT std::ostream & operator<< (std::ostream &os, pooling_mode v)
MIGRAPHX_EXPORT std::ostream & operator<< (std::ostream &os, rnn_direction v)
struct abs : public migraphx::internal::op::unary<abs>#
#include <migraphx/op/abs.hpp>
struct acos : public migraphx::internal::op::unary<acos>#
#include <migraphx/op/acos.hpp>
struct acosh : public migraphx::internal::op::unary<acosh>#
#include <migraphx/op/acosh.hpp>
struct add : public migraphx::internal::op::binary<add>#
#include <migraphx/op/add.hpp>
struct allocate#
#include <migraphx/op/allocate.hpp>

Static allocate: No inputs: allocate() this.s attribute set to the static output shape of the buffer.

Dynamic allocate: One input: allocate(output_dims) output_dims are the output buffer dimensions and has a static shape. Either this.s or this.buf_type must be set to calculate the dynamic output shape at compute time. If this.buf_type is set, the compute_shape() of allocate at compile time will have dynamic_dimensions from {0, max_int} with rank = output_dims.ndim(). If this.s is set then the compute_shape() will output this.s; this.s should be a dynamic shape.

struct argmax#
#include <migraphx/op/argmax.hpp>
struct argmin#
#include <migraphx/op/argmin.hpp>
struct as_shape#
#include <migraphx/op/as_shape.hpp>
struct asin : public migraphx::internal::op::unary<asin>#
#include <migraphx/op/asin.hpp>
struct asinh : public migraphx::internal::op::unary<asinh>#
#include <migraphx/op/asinh.hpp>
struct atan : public migraphx::internal::op::unary<atan>#
#include <migraphx/op/atan.hpp>
struct atanh : public migraphx::internal::op::unary<atanh>#
#include <migraphx/op/atanh.hpp>
template<class Derived>
struct binary : public migraphx::internal::op::op_name<Derived>#
#include <migraphx/op/binary.hpp>
struct broadcast#
#include <migraphx/op/broadcast.hpp>

1 input version: Broadcasts a tensor from the original shape to the broadcast_lens by setting the stride of broadcasted dimensions to zero. axis attribute for a 1D input shape is the output dimension that stays the same. ex: broadcasting shape [1024] -> [4, 1024, 3] has axis = 1.

For higher rank input shapes, axis is an offset parameter for the broadcasting. Such that this operator would work in the opposite direction of NumPy broadcasting (left-most to rightwards element-wise comparison) ex: broadcasting shape [2, 2] -> [2, 2, 3] with axis = 0

2 input version: Broadcast the first input 1D shape into the second input shape based on the axis parameter. Handles broadcasting a 1D static shape into a higher rank dynamic shape. broadcast_lens is not used

struct capture#
#include <migraphx/op/capture.hpp>
struct ceil : public migraphx::internal::op::unary<ceil>#
#include <migraphx/op/ceil.hpp>
struct clip#
#include <migraphx/op/clip.hpp>
struct concat#
#include <migraphx/op/concat.hpp>
struct contiguous#
#include <migraphx/op/contiguous.hpp>

The contiguous operator takes a non-standard input tensor and returns the same tensor but in standard form. For example, if input tensor A which has lens = (4,5) is first transposed, i.e. lens = (5,4), this tensor’s data layout remained the same during the transpose operation; only it’s shape lengths and strides were changed. This leaves the tensor in a non-standard form. The contiguous operator copies the underlying data such that resulting tensor is returned to a standard form.

struct convert : public migraphx::internal::op::unary<convert>#
#include <migraphx/op/convert.hpp>
struct convolution#
#include <migraphx/op/convolution.hpp>

Convolution operator. Does not support optimal dimensions for spatial dimensions. Returns empty optimals.

struct convolution_backwards#
#include <migraphx/op/convolution_backwards.hpp>
struct cos : public migraphx::internal::op::unary<cos>#
#include <migraphx/op/cos.hpp>
struct cosh : public migraphx::internal::op::unary<cosh>#
#include <migraphx/op/cosh.hpp>
struct dequantizelinear#
#include <migraphx/op/dequantizelinear.hpp>
struct dimensions_of#
#include <migraphx/op/dimensions_of.hpp>

Returns the dimensions of the input argument from starting axis to ending axis. Atleast end must be set to use this operator (set end to ndim for default ONNX behavior of Shape operator) This should only be used for dynamic shapes as this can be simplified to a literal for static shapes.

struct div : public migraphx::internal::op::binary<div>#
#include <migraphx/op/div.hpp>
struct dot#
#include <migraphx/op/dot.hpp>
struct elu : public migraphx::internal::op::unary<elu>#
#include <migraphx/op/elu.hpp>
struct equal : public migraphx::internal::op::binary<equal>#
#include <migraphx/op/equal.hpp>
struct erf : public migraphx::internal::op::unary<erf>#
#include <migraphx/op/erf.hpp>
struct exp : public migraphx::internal::op::unary<exp>#
#include <migraphx/op/exp.hpp>
struct fill#
#include <migraphx/op/fill.hpp>

fill(default_value, output_buffer) Fill an output buffer with the given default_value. Note that if the default_value is a literal and the output_buffer has a static shape this operator can be replaced with a literal.

struct flatten#
#include <migraphx/op/flatten.hpp>
struct floor : public migraphx::internal::op::unary<floor>#
#include <migraphx/op/floor.hpp>
struct fmod : public migraphx::internal::op::binary<fmod>#
#include <migraphx/op/fmod.hpp>
struct gather#
#include <migraphx/op/gather.hpp>
struct gathernd#
#include <migraphx/op/gathernd.hpp>
struct get_tuple_elem#
#include <migraphx/op/get_tuple_elem.hpp>
struct greater : public migraphx::internal::op::binary<greater>#
#include <migraphx/op/greater.hpp>
struct gru#
#include <migraphx/op/gru.hpp>
struct highest#
#include <migraphx/op/reduce_op.hpp>
struct identity#
#include <migraphx/op/identity.hpp>
struct if_op#
#include <migraphx/op/if_op.hpp>
struct im2col#
#include <migraphx/op/im2col.hpp>
struct isnan : public migraphx::internal::op::unary<isnan>#
#include <migraphx/op/isnan.hpp>
struct layout : public migraphx::internal::op::unary<layout>#
#include <migraphx/op/layout.hpp>
struct leaky_relu : public migraphx::internal::op::unary<leaky_relu>#
#include <migraphx/op/leaky_relu.hpp>
struct less : public migraphx::internal::op::binary<less>#
#include <migraphx/op/less.hpp>
struct load#
#include <migraphx/op/load.hpp>
struct log : public migraphx::internal::op::unary<log>#
#include <migraphx/op/log.hpp>
struct logical_and : public migraphx::internal::op::binary<logical_and>#
#include <migraphx/op/logical_and.hpp>
struct logical_or : public migraphx::internal::op::binary<logical_or>#
#include <migraphx/op/logical_or.hpp>
struct logical_xor : public migraphx::internal::op::binary<logical_xor>#
#include <migraphx/op/logical_xor.hpp>
struct logsoftmax#
#include <migraphx/op/logsoftmax.hpp>
struct loop#
#include <migraphx/op/loop.hpp>
struct lowest#
#include <migraphx/op/reduce_op.hpp>
struct lrn#
#include <migraphx/op/lrn.hpp>
struct lstm#
#include <migraphx/op/lstm.hpp>
struct max : public migraphx::internal::op::binary<max>#
#include <migraphx/op/max.hpp>
struct min : public migraphx::internal::op::binary<min>#
#include <migraphx/op/min.hpp>
struct mod : public migraphx::internal::op::binary<mod>#
#include <migraphx/op/mod.hpp>
struct mul : public migraphx::internal::op::binary<mul>#
#include <migraphx/op/mul.hpp>
struct multibroadcast#
#include <migraphx/op/multibroadcast.hpp>

Broadcast multiple dimensions between two tensors. Two versions of this operator: 1 input and 2+ inputs. One input version uses output_lens attribute and broadcasts to it. 2+ inputs version broadcasts first input to the common shape at evaluation time.

struct multinomial#
#include <migraphx/op/multinomial.hpp>
struct neg : public migraphx::internal::op::unary<neg>#
#include <migraphx/op/neg.hpp>
struct nonmaxsuppression#
#include <migraphx/op/nonmaxsuppression.hpp>
struct nonzero#
#include <migraphx/op/nonzero.hpp>
struct one#
#include <migraphx/op/reduce_op.hpp>
template<class Derived>
struct op_name#
#include <migraphx/op/name.hpp>

Create name from class.

Subclassed by migraphx::internal::op::binary< add >, migraphx::internal::op::binary< div >, migraphx::internal::op::binary< equal >, migraphx::internal::op::binary< fmod >, migraphx::internal::op::binary< greater >, migraphx::internal::op::binary< less >, migraphx::internal::op::binary< logical_and >, migraphx::internal::op::binary< logical_or >, migraphx::internal::op::binary< logical_xor >, migraphx::internal::op::binary< max >, migraphx::internal::op::binary< min >, migraphx::internal::op::binary< mod >, migraphx::internal::op::binary< mul >, migraphx::internal::op::binary< pow >, migraphx::internal::op::binary< prelu >, migraphx::internal::op::binary< sqdiff >, migraphx::internal::op::binary< sub >, migraphx::internal::op::prefix_scan_op< prefix_scan_sum >, migraphx::internal::op::reduce_op< reduce_max >, migraphx::internal::op::reduce_op< reduce_mean >, migraphx::internal::op::reduce_op< reduce_min >, migraphx::internal::op::reduce_op< reduce_prod >, migraphx::internal::op::reduce_op< reduce_sum >, migraphx::internal::op::scatter< scatter_add >, migraphx::internal::op::scatter< scatter_mul >, migraphx::internal::op::scatter< scatter_none >, migraphx::internal::op::scatternd_op< scatternd_add >, migraphx::internal::op::scatternd_op< scatternd_mul >, migraphx::internal::op::scatternd_op< scatternd_none >, migraphx::internal::op::unary< abs >, migraphx::internal::op::unary< acos >, migraphx::internal::op::unary< acosh >, migraphx::internal::op::unary< asin >, migraphx::internal::op::unary< asinh >, migraphx::internal::op::unary< atan >, migraphx::internal::op::unary< atanh >, migraphx::internal::op::unary< ceil >, migraphx::internal::op::unary< convert >, migraphx::internal::op::unary< cos >, migraphx::internal::op::unary< cosh >, migraphx::internal::op::unary< elu >, migraphx::internal::op::unary< erf >, migraphx::internal::op::unary< exp >, migraphx::internal::op::unary< floor >, migraphx::internal::op::unary< isnan >, migraphx::internal::op::unary< layout >, migraphx::internal::op::unary< leaky_relu >, migraphx::internal::op::unary< log >, migraphx::internal::op::unary< neg >, migraphx::internal::op::unary< recip >, migraphx::internal::op::unary< relu >, migraphx::internal::op::unary< round >, migraphx::internal::op::unary< rsqrt >, migraphx::internal::op::unary< sigmoid >, migraphx::internal::op::unary< sign >, migraphx::internal::op::unary< sin >, migraphx::internal::op::unary< sinh >, migraphx::internal::op::unary< sqrt >, migraphx::internal::op::unary< tan >, migraphx::internal::op::unary< tanh >, migraphx::internal::op::unary< unary_not >, migraphx::internal::op::binary< Derived >, migraphx::internal::op::prefix_scan_op< Derived >, migraphx::internal::op::reduce_op< Derived >, migraphx::internal::op::scatter< Derived >, migraphx::internal::op::scatternd_op< Derived >, migraphx::internal::op::unary< Derived >

struct outline#
#include <migraphx/op/outline.hpp>
struct pad#
#include <migraphx/op/pad.hpp>
struct pointwise#
#include <migraphx/op/pointwise.hpp>
struct pooling#
#include <migraphx/op/pooling.hpp>
struct pow : public migraphx::internal::op::binary<pow>#
#include <migraphx/op/pow.hpp>
template<class Derived>
struct prefix_scan_op : public migraphx::internal::op::op_name<Derived>#
#include <migraphx/op/prefix_scan_op.hpp>

Parent struct for prefix scan operations. A prefix scan is equivalent to the C++ std::exclusive_scan or std::inclusive_scan. Given a list of numbers, a prefix scan sum op returns an equal size list of running totals of the values. Other operations besides addition can be supported by their own child ops.

struct prefix_scan_sum : public migraphx::internal::op::prefix_scan_op<prefix_scan_sum>#
#include <migraphx/op/prefix_scan_sum.hpp>
struct prelu : public migraphx::internal::op::binary<prelu>#
#include <migraphx/op/prelu.hpp>
struct quant_convolution#
#include <migraphx/op/quant_convolution.hpp>
struct quant_dot#
#include <migraphx/op/quant_dot.hpp>
struct quantizelinear#
#include <migraphx/op/quantizelinear.hpp>
struct random_seed#
#include <migraphx/op/random_seed.hpp>

Generates a random seed for the use of random number generators. Generating the seed at runtime guarantees there will be a different random sequence on every execution. This operation has no inputs or attributes, and outputs an unsigned integer tensor with a single value.

struct random_uniform#
#include <migraphx/op/random_uniform.hpp>

random_uniform populates the passed shape with random numbers, in a uniform distribution. Range for floating-point data types is (0, 1); for integer types it is [0, <max value for the type>]

struct recip : public migraphx::internal::op::unary<recip>#
#include <migraphx/op/recip.hpp>
struct reduce_max : public migraphx::internal::op::reduce_op<reduce_max>#
#include <migraphx/op/reduce_max.hpp>
struct reduce_mean : public migraphx::internal::op::reduce_op<reduce_mean>#
#include <migraphx/op/reduce_mean.hpp>
struct reduce_min : public migraphx::internal::op::reduce_op<reduce_min>#
#include <migraphx/op/reduce_min.hpp>
template<class Derived>
struct reduce_op : public migraphx::internal::op::op_name<Derived>#
#include <migraphx/op/reduce_op.hpp>
struct reduce_prod : public migraphx::internal::op::reduce_op<reduce_prod>#
#include <migraphx/op/reduce_prod.hpp>
struct reduce_sum : public migraphx::internal::op::reduce_op<reduce_sum>#
#include <migraphx/op/reduce_sum.hpp>
struct relu : public migraphx::internal::op::unary<relu>#
#include <migraphx/op/relu.hpp>
struct reshape#
#include <migraphx/op/reshape.hpp>

1 input version: reshape(input_data) this.dims = output_dims Makes a copy of input_data to the output shape.

2 input version: reshape(input_data, output_buffer) this.dims = unset Copies input_data to output_buffer; output_buffer already has the output shape. This version will not fail gracefully if the input shape and output_buffer shape are incompatible. There’s a throw that will catch when the number of elements do not match at runtime. This version should only be used for dynamic reshapes (output dimensions only known at runtime). If output_buffer has a static shape during compile/parse, you can use the 1 input version.

struct reshape_lazy#
#include <migraphx/op/reshape_lazy.hpp>
struct reverse#
#include <migraphx/op/reverse.hpp>
struct rnn#
#include <migraphx/op/rnn.hpp>
struct rnn_last_cell_output#
#include <migraphx/op/rnn_last_cell_output.hpp>
struct rnn_last_hs_output#
#include <migraphx/op/rnn_last_hs_output.hpp>
struct rnn_var_sl_last_output#
#include <migraphx/op/rnn_var_sl_last_output.hpp>
struct rnn_var_sl_shift_output#
#include <migraphx/op/rnn_variable_seq_lens.hpp>
struct rnn_var_sl_shift_sequence#
#include <migraphx/op/rnn_variable_seq_lens.hpp>
struct roialign#
#include <migraphx/op/roialign.hpp>
struct round : public migraphx::internal::op::unary<round>#
#include <migraphx/op/round.hpp>
struct rsqrt : public migraphx::internal::op::unary<rsqrt>#
#include <migraphx/op/rsqrt.hpp>
struct run_on_target#
#include <migraphx/op/run_on_target.hpp>
struct scalar#
#include <migraphx/op/scalar.hpp>
template<class Derived>
struct scatter : public migraphx::internal::op::op_name<Derived>#
#include <migraphx/op/scatter.hpp>
struct scatter_add : public migraphx::internal::op::scatter<scatter_add>#
#include <migraphx/op/scatter_add.hpp>
struct scatter_mul : public migraphx::internal::op::scatter<scatter_mul>#
#include <migraphx/op/scatter_mul.hpp>
struct scatter_none : public migraphx::internal::op::scatter<scatter_none>#
#include <migraphx/op/scatter_none.hpp>
struct scatternd_add : public migraphx::internal::op::scatternd_op<scatternd_add>#
#include <migraphx/op/scatternd_add.hpp>
struct scatternd_mul : public migraphx::internal::op::scatternd_op<scatternd_mul>#
#include <migraphx/op/scatternd_mul.hpp>
struct scatternd_none : public migraphx::internal::op::scatternd_op<scatternd_none>#
#include <migraphx/op/scatternd_none.hpp>
template<class Derived>
struct scatternd_op : public migraphx::internal::op::op_name<Derived>#
#include <migraphx/op/scatternd_op.hpp>

N-dimensional Scatter operations. This struct is parent class to ops which differ in what formula is used to reduce (combine old and new values of) the scattered value. It was originally based on Onnx ScatterND operation (see onnx/onnx) and is also similar to Numpy numpy.add.at().

Template Parameters:

Derived – a template parameter in the CRTP inheritance idiom, represents one of the child operations.

struct select_module#
#include <migraphx/op/select_module.hpp>
struct sigmoid : public migraphx::internal::op::unary<sigmoid>#
#include <migraphx/op/sigmoid.hpp>
struct sign : public migraphx::internal::op::unary<sign>#
#include <migraphx/op/sign.hpp>
struct sin : public migraphx::internal::op::unary<sin>#
#include <migraphx/op/sin.hpp>
struct sinh : public migraphx::internal::op::unary<sinh>#
#include <migraphx/op/sinh.hpp>
struct slice#
#include <migraphx/op/slice.hpp>

Slice operator that accepts variable axes, starts and ends.

Attributes: axes: constant axes to slice over (optional) starts: constant slice starting indices (optional) ends: constant slice ending indices (optional)

Parameters: data: the input tensor to slice (dynamic or static shape) input_starts: starting indicies of slice (optional, static shape) input_ends: ending indicies of slice (optional, static shape) input_axes: axes to slice over (optional, static shape)

struct softmax#
#include <migraphx/op/softmax.hpp>
struct sqdiff : public migraphx::internal::op::binary<sqdiff>#
#include <migraphx/op/sqdiff.hpp>
struct sqrt : public migraphx::internal::op::unary<sqrt>#
#include <migraphx/op/sqrt.hpp>
struct squeeze#
#include <migraphx/op/squeeze.hpp>
struct step#
#include <migraphx/op/step.hpp>
struct sub : public migraphx::internal::op::binary<sub>#
#include <migraphx/op/sub.hpp>
struct tan : public migraphx::internal::op::unary<tan>#
#include <migraphx/op/tan.hpp>
struct tanh : public migraphx::internal::op::unary<tanh>#
#include <migraphx/op/tanh.hpp>
struct topk#
#include <migraphx/op/topk.hpp>
struct transpose#
#include <migraphx/op/transpose.hpp>
template<class Derived>
struct unary : public migraphx::internal::op::op_name<Derived>#
#include <migraphx/op/unary.hpp>
struct unary_not : public migraphx::internal::op::unary<unary_not>#
#include <migraphx/op/unary_not.hpp>
struct undefined#
#include <migraphx/op/undefined.hpp>
struct unknown#
#include <migraphx/op/unknown.hpp>
struct unsqueeze#
#include <migraphx/op/unsqueeze.hpp>

Adds dimensions to a tensor based on the axes attribute. axes are based on the number of output shape dimensions and should not contain duplicates. steps are for modifying dimensions added to the middle of the original shape. Each step must be a factor of the original dimension. ex: unsqueeze(shape = [3, 4, 10], axes = [2, 4, 5], steps = [2]) -> shape = [3, 4, 2, 5, 1, 1] Dynamic shape version does not handle steps.

struct where#
#include <migraphx/op/where.hpp>
struct zero#
#include <migraphx/op/reduce_op.hpp>