diff --git a/index.bs b/index.bs index ef199b7a..ba09114b 100644 --- a/index.bs +++ b/index.bs @@ -870,6 +870,8 @@ dictionary MLComputeResult { interface MLContext { Promise compute( MLGraph graph, MLNamedArrayBufferViews inputs, MLNamedArrayBufferViews outputs); + + MLOpSupportLimits opSupportLimits(); }; @@ -1043,6 +1045,75 @@ Note: Invocations of {{MLContext/compute()}} will fail if any of the {{MLContext +### {{MLContext/opSupportLimits()}} ### {#api-mlcontext-opsupportlimits} +The {{MLContext/opSupportLimits()}} exposes level of support that differs across implementations at operator level. Consumers of the WebNN API are encouraged to probe feature support level by using {{MLContext/opSupportLimits()}} to determine the optimal model architecture to be deployed for each target platform. + +#### {{MLOpSupportLimits}} dictionary #### {#api-mlcontext-opsupportlimits-dictionary} +The {{MLOpSupportLimits}} has following top level members, aside from these, each [=operator=] has a corresponding member defined in its builder method. + + +
+ : preferredInputLayout + :: Preferred input layout for layout dependent operators like {{MLGraphBuilder/conv2d()}}. + : input + :: Support limits for input {{MLOperand}}s for an {{MLGraph}}. + : constant + :: Support limits for constant {{MLOperand}}s for an {{MLGraph}}. + : output + :: Support limits for output {{MLOperand}}s for an {{MLGraph}}. +
+#### {{MLSupportLimits}} dictionary #### {#api-mlcontext-supportlimits-dictionary} + +
+ : dataTypes + :: Supported data types. +
+ +#### {{MLBinarySupportLimits}} dictionary #### {#api-mlcontext-binarysupportlimits-dictionary} + + +
+ : a + :: {{MLSupportLimits}} for a operand. + : b + :: {{MLSupportLimits}} for b operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +#### {{MLSingleInputSupportLimits}} dictionary #### {#api-mlcontext-singleinputsupportlimits-dictionary} + + +
+ : input + :: {{MLSupportLimits}} for input operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ + ## {{MLGraph}} interface ## {#api-mlgraph} The {{MLGraph}} interface represents a compiled computational graph. A compiled graph once constructed is immutable and cannot be subsequently changed. @@ -1481,6 +1552,11 @@ partial interface MLGraphBuilder { MLOperand argMax(MLOperand input, [EnforceRange] unsigned long axis, optional MLArgMinMaxOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits argMin; + MLSingleInputSupportLimits argMax; +}; {{MLArgMinMaxOptions}} has the following members: @@ -1503,6 +1579,14 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The N-D tensor of the reduced shape. The values must be of type |options|.{{MLArgMinMaxOptions/outputDataType}} in the range [0, N-1] where N is the size of the input dimension specified by axis. +{{MLOpSupportLimits}} has following members for {{MLGraphBuilder/argMin()}} and {{MLGraphBuilder/argMax()}}: +
+ : argMin + :: Support limits for operator {{MLGraphBuilder/argMin()}}. + : argMax + :: Support limits for operator {{MLGraphBuilder/argMax()}}. +
+
To create argMin/argMax operation given [=string=] |op|, {{MLOperand}} |input|, {{unsigned long}} |axis|, and {{MLArgMinMaxOptions}} |options|, run the following steps: @@ -1556,6 +1640,19 @@ partial interface MLGraphBuilder { MLOperand batchNormalization(MLOperand input, MLOperand mean, MLOperand variance, optional MLBatchNormalizationOptions options = {}); }; + +dictionary MLBatchNormalizationSupportLimits { + MLSupportLimits input; + MLSupportLimits mean; + MLSupportLimits variance; + MLSupportLimits scale; + MLSupportLimits bias; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLBatchNormalizationSupportLimits batchNormalization; +}; {{MLBatchNormalizationOptions}} has the following members: @@ -1587,6 +1684,28 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The batch-normalized N-D tensor of the same shape as *input*. +{{MLBatchNormalizationSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : mean + :: {{MLSupportLimits}} for mean operand. + : variance + :: {{MLSupportLimits}} for variance operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following members for {{MLGraphBuilder/batchNormalization()}}: +
+ : batchNormalization + :: Support limits for operator {{MLGraphBuilder/batchNormalization()}}. +
+
The batchNormalization(|input|, |mean|, |variance|, |options|) method steps are: @@ -1648,6 +1767,10 @@ partial interface MLGraphBuilder { MLOperandDataType type, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits cast; +};
**Arguments:** @@ -1658,6 +1781,11 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The N-D tensor of the same shape as *input* with each element casted to the target data type.
+{{MLOpSupportLimits}} has following members for {{MLGraphBuilder/cast()}}: +
+ : cast + :: Support limits for operator {{MLGraphBuilder/cast()}}. +
Casting between {{MLOperandDataType}}s is specified for some cases and [=implementation-defined=] in other cases, according to the following table: @@ -1749,6 +1877,10 @@ dictionary MLClampOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand clamp(MLOperand input, optional MLClampOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits clamp; +}; {{MLClampOptions}} has the following members: @@ -1768,6 +1900,11 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/clamp()}}: +
+ : clamp + :: Support limits for operator {{MLGraphBuilder/clamp()}}. +
The clamp(|input|, |options|) method steps are: @@ -1826,6 +1963,15 @@ partial interface MLGraphBuilder { [EnforceRange] unsigned long axis, optional MLOperatorOptions options = {}); }; + +dictionary MLConcatSupportLimits { + MLSupportLimits inputs; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLConcatSupportLimits concat; +};
**Arguments:** @@ -1840,6 +1986,20 @@ partial interface MLGraphBuilder { computed as the sum of all the input sizes of the same dimension.
+{{MLConcatSupportLimits}} has following members: +
+ : inputs + :: {{MLSupportLimits}} for all input operands. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/concat()}}: +
+ : concat + :: Support limits for operator {{MLGraphBuilder/concat()}}. +
+
The concat(|inputs|, |axis|, |options|) method steps are: @@ -1898,6 +2058,17 @@ partial interface MLGraphBuilder { MLOperand filter, optional MLConv2dOptions options = {}); }; + +dictionary MLConv2dSupportLimits { + MLSupportLimits input; + MLSupportLimits filter; + MLSupportLimits bias; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLConv2dSupportLimits conv2d; +}; {{MLConv2dOptions}} has the following members: @@ -1959,6 +2130,24 @@ partial interface MLGraphBuilder { `outputSize = 1 + (inputSize - (filterSize - 1) * dilation - 1 + beginningPadding + endingPadding) / stride` +{{MLConv2dSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : filter + :: {{MLSupportLimits}} for filter operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/conv2d()}}: +
+ : conv2d + :: Support limits for operator {{MLGraphBuilder/conv2d()}}. +
+
A *depthwise* conv2d operation is a variant of grouped convolution, used in models like the MobileNet, where the *options.groups* = inputChannels = outputChannels and the shape of filter tensor is *[options.groups, 1, height, width]* for {{MLConv2dFilterOperandLayout/"oihw"}} layout, *[height, width, 1, options.groups]* for {{MLConv2dFilterOperandLayout/"hwio"}} layout, *[options.groups, height, width, 1]* for {{MLConv2dFilterOperandLayout/"ohwi"}} layout and *[1, height, width, options.groups]* for {{MLConv2dFilterOperandLayout/"ihwo"}} layout. @@ -2097,6 +2286,10 @@ partial interface MLGraphBuilder { MLOperand convTranspose2d(MLOperand input, MLOperand filter, optional MLConvTranspose2dOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLConv2dSupportLimits convTranspose2d; +}; {{MLConvTranspose2dOptions}} has the following members: @@ -2173,6 +2366,12 @@ partial interface MLGraphBuilder { `outputSize = (inputSize - 1) * stride + (filterSize - 1) * dilation + 1 - beginningPadding - endingPadding + outputPadding`
+{{MLOpSupportLimits}} has following member for : +
+ : convTranspose2d + :: Support limits for operator {{MLGraphBuilder/convTranspose2d()}}. +
+
To calculate convtranspose output size given unsigned integers |inputSize|, |filterSize|, |beginningPadding|, |endingPadding|, |stride|, |dilation|, and |outputPadding|, perform these steps. They return a number. @@ -2296,6 +2495,16 @@ partial interface MLGraphBuilder { MLOperand min(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); MLOperand pow(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLBinarySupportLimits add; + MLBinarySupportLimits sub; + MLBinarySupportLimits mul; + MLBinarySupportLimits div; + MLBinarySupportLimits max; + MLBinarySupportLimits min; + MLBinarySupportLimits pow; +};
@@ -2318,6 +2527,24 @@ partial interface MLGraphBuilder { - *pow*: Compute the values of the values of the first input tensor to the power of the values of the second input tensor, element-wise.
+{{MLOpSupportLimits}} has following members for eleemntwise-binary operations: +
+ : add + :: Support limits for operator {{MLGraphBuilder/add()}}. + : sub + :: Support limits for operator {{MLGraphBuilder/sub()}}. + : mul + :: Support limits for operator {{MLGraphBuilder/mul()}}. + : div + :: Support limits for operator {{MLGraphBuilder/div()}}. + : max + :: Support limits for operator {{MLGraphBuilder/max()}}. + : min + :: Support limits for operator {{MLGraphBuilder/min()}}. + : pow + :: Support limits for operator {{MLGraphBuilder/pow()}}. +
+
To create element-wise binary operation given [=string=] |op|, {{MLOperand}} |a|, {{MLOperand}} |b|, and {{MLOperatorOptions}} |options|, run the following steps: @@ -2416,6 +2643,20 @@ partial interface MLGraphBuilder { optional MLOperatorOptions options = {}); MLOperand logicalNot(MLOperand a, optional MLOperatorOptions options = {}); }; + +dictionary MLLogicalNotSupportLimits { + MLSupportLimits a; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLBinarySupportLimits equal; + MLBinarySupportLimits greater; + MLBinarySupportLimits greaterOrEqual; + MLBinarySupportLimits lesser; + MLBinarySupportLimits lesserOrEqual; + MLLogicalNotSupportLimits logicalNot; +};
@@ -2426,6 +2667,31 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The output tensor that contains the result of element-wise comparison of the two input tensors.
+ +{{MLLogicalNotSupportLimits}} has following members: +
+ : a + :: {{MLSupportLimits}} for a operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following members for element-wise logical operations: +
+ : equal + :: Support limits for operator {{MLGraphBuilder/equal()}}. + : greater + :: Support limits for operator {{MLGraphBuilder/greater()}}. + : greaterOrEqual + :: Support limits for operator {{MLGraphBuilder/greaterOrEqual()}}. + : lesser + :: Support limits for operator {{MLGraphBuilder/lesser()}}. + : lesserOrEqual + :: Support limits for operator {{MLGraphBuilder/lesserOrEqual()}}. + : logicalNot + :: Support limits for operator {{MLGraphBuilder/logicalNot()}}. +
+
**Operation types:** - *equal*: Compare if the values of the two input tensors are equal, element-wise. @@ -2529,6 +2795,22 @@ partial interface MLGraphBuilder { MLOperand sqrt(MLOperand input, optional MLOperatorOptions options = {}); MLOperand tan(MLOperand input, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits abs; + MLSingleInputSupportLimits ceil; + MLSingleInputSupportLimits cos; + MLSingleInputSupportLimits erf; + MLSingleInputSupportLimits exp; + MLSingleInputSupportLimits floor; + MLSingleInputSupportLimits identity; + MLSingleInputSupportLimits log; + MLSingleInputSupportLimits neg; + MLSingleInputSupportLimits reciprocal; + MLSingleInputSupportLimits sin; + MLSingleInputSupportLimits sqrt; + MLSingleInputSupportLimits tan; +};
@@ -2541,6 +2823,36 @@ partial interface MLGraphBuilder { tensor is the same as the shape of input tensor.
+{{MLOpSupportLimits}} has following members for element-wise unary operations: +
+ : abs + :: Support limits for operator {{MLGraphBuilder/abs()}}. + : ceil + :: Support limits for operator {{MLGraphBuilder/ceil()}}. + : cos + :: Support limits for operator {{MLGraphBuilder/cos()}}. + : erf + :: Support limits for operator {{MLGraphBuilder/erf()}}. + : exp + :: Support limits for operator {{MLGraphBuilder/exp()}}. + : floor + :: Support limits for operator {{MLGraphBuilder/floor()}}. + : identity + :: Support limits for operator {{MLGraphBuilder/identity()}}. + : log + :: Support limits for operator {{MLGraphBuilder/log()}}. + : neg + :: Support limits for operator {{MLGraphBuilder/neg()}}. + : reciprocal + :: Support limits for operator {{MLGraphBuilder/reciprocal()}}. + : sin + :: Support limits for operator {{MLGraphBuilder/sin()}}. + : sqrt + :: Support limits for operator {{MLGraphBuilder/sqrt()}}. + : tan + :: Support limits for operator {{MLGraphBuilder/tan()}}. +
+
**Operation types:** - *abs*: Compute the absolute value of the input tensor, element-wise. @@ -2682,6 +2994,10 @@ dictionary MLEluOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand elu(MLOperand input, optional MLEluOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits elu; +}; {{MLEluOptions}} has the following members: @@ -2699,6 +3015,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following members for {{MLGraphBuilder/elu()}}: +
+ : elu + :: Support limits for operator {{MLGraphBuilder/elu()}}. +
+
The elu(|input|, |options|) method steps are: @@ -2743,6 +3065,10 @@ partial interface MLGraphBuilder { sequence<[EnforceRange] unsigned long> newShape, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits expand; +};
**Arguments:** @@ -2753,6 +3079,12 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The tensor with expanded size dimensions.
+{{MLOpSupportLimits}} has following members for {{MLGraphBuilder/expand()}}: +
+ : expand + :: Support limits for operator {{MLGraphBuilder/expand()}}. +
+
The expand(|input|, |newShape|, |options|) method steps are: @@ -2783,6 +3115,16 @@ partial interface MLGraphBuilder { MLOperand indices, optional MLGatherOptions options = {}); }; + +dictionary MLGatherSupportLimits { + MLSupportLimits input; + MLSupportLimits indices; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLGatherSupportLimits gather; +}; {{MLGatherOptions}} has the following members: @@ -2801,6 +3143,22 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The output N-D tensor of [=MLOperand/rank=] equal to the [=MLOperand/rank=] of *input* + the [=MLOperand/rank=] of *indices* - 1.
+{{MLGatherSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : indices + :: {{MLSupportLimits}} for indices operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following members for {{MLGraphBuilder/gather()}}: +
+ : gather + :: Support limits for operator {{MLGraphBuilder/gather()}}. +
+
The {{MLGraphBuilder/gather(input, indices, options)/indices}} parameter to {{MLGraphBuilder/gather()}} can not be clamped to the allowed range when the graph is built because the inputs are not known until execution. Implementations can introduce {{MLGraphBuilder/clamp()}} in the compiled graph if the required clamping behavior is not provided by the underlying platform. Similarly, if the underlying platform does not support negative indices, the implementation can introduce operations in the compiled graph to transform a negative index from the end of the dimension into a positive index.
@@ -2907,6 +3265,10 @@ Compute the @@ -2918,6 +3280,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/gelu()}}: +
+ : gelu + :: Support limits for operator {{MLGraphBuilder/gelu()}}. +
+
The gelu(|input|, |options|) method steps are: @@ -2967,6 +3335,17 @@ dictionary MLGemmOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand gemm(MLOperand a, MLOperand b, optional MLGemmOptions options = {}); }; + +dictionary MLGemmSupportLimits { + MLSupportLimits a; + MLSupportLimits b; + MLSupportLimits c; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLGemmSupportLimits gemm; +}; {{MLGemmOptions}} has the following members: @@ -3001,6 +3380,24 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The output 2-D tensor of shape *[M, N]* that contains the calculated product of all the inputs. +{{MLGemmSupportLimits}} has following members: +
+ : a + :: {{MLSupportLimits}} for a operand. + : b + :: {{MLSupportLimits}} for b operand. + : c + :: {{MLSupportLimits}} for c operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/gemm()}}: +
+ : gemm + :: Support limits for operator {{MLGraphBuilder/gemm()}}. +
+
The gemm(|a|, |b|, |options|) method steps are: @@ -3096,6 +3493,20 @@ partial interface MLGraphBuilder { [EnforceRange] unsigned long hiddenSize, optional MLGruOptions options = {}); }; + +dictionary MLGruSupportLimits { + MLSupportLimits input; + MLSupportLimits weight; + MLSupportLimits recurrentWeight; + MLSupportLimits bias; + MLSupportLimits recurrentBias; + MLSupportLimits initialHiddenState; + MLSupportLimits outputs; +}; + +partial dictionary MLOpSupportLimits { + MLGruSupportLimits gru; +}; {{MLGruOptions}} has the following members: @@ -3146,6 +3557,30 @@ partial interface MLGraphBuilder { **Returns:** [=sequence=]<{{MLOperand}}>. The first element is a 3-D tensor of shape *[numDirections, batchSize, hiddenSize]*, the cell output from the last time step of the network. Additionally, if |options|.{{MLGruOptions/returnSequence}} is set to true, the second element is the 4-D output tensor of shape *[steps, numDirections, batchSize, hiddenSize]* containing every cell outputs from each time step in the temporal sequence. +{{MLGruSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : weight + :: {{MLSupportLimits}} for weight operand. + : recurrentWeight + :: {{MLSupportLimits}} for recurrentWeight operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : recurrentBias + :: {{MLSupportLimits}} for recurrentBias operand. + : initialHiddenState + :: {{MLSupportLimits}} for initialHiddenState operand. + : outputs + :: {{MLSupportLimits}} for all the output operands. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/gru()}}: +
+ : gru + :: Support limits for operator {{MLGraphBuilder/gru()}}. +
+
The gru(|input|, |weight|, |recurrentWeight|, |steps|, |hiddenSize|, |options|) method steps are: @@ -3328,6 +3763,20 @@ partial interface MLGraphBuilder { [EnforceRange] unsigned long hiddenSize, optional MLGruCellOptions options = {}); }; + +dictionary MLGruCellSupportLimits { + MLSupportLimits input; + MLSupportLimits weight; + MLSupportLimits recurrentWeight; + MLSupportLimits hiddenState; + MLSupportLimits bias; + MLSupportLimits recurrentBias; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLGruCellSupportLimits gruCell; +}; {{MLGruCellOptions}} has the following members: @@ -3365,6 +3814,30 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The 2-D tensor of shape *[batchSize, hiddenSize]*, the cell output hidden state of a single time step of the recurrent network. +{{MLGruCellSupportLimits}} has following members; +
+ : input + :: {{MLSupportLimits}} for input operand. + : weight + :: {{MLSupportLimits}} for weight operand. + : recurrentWeight + :: {{MLSupportLimits}} for recurrentWeight operand. + : hiddenState + :: {{MLSupportLimits}} for hiddenState operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : recurrentBias + :: {{MLSupportLimits}} for recurrentBias operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/gruCell()}}: +
+ : gruCell + :: Support limits for operator {{MLGraphBuilder/gruCell()}}. +
+
The gruCell(|input|, |weight|, |recurrentWeight|, |hiddenState|, |hiddenSize|, |options|) method steps are: @@ -3523,6 +3996,10 @@ dictionary MLHardSigmoidOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand hardSigmoid(MLOperand input, optional MLHardSigmoidOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits hardSigmoid; +}; {{MLHardSigmoidOptions}} has the following members: @@ -3544,6 +4021,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/hardSigmoid()}}: +
+ : hardSigmoid + :: Support limits for operator {{MLGraphBuilder/hardSigmoid()}}. +
+
The hardSigmoid(|input|, |options|) method steps are: @@ -3587,6 +4070,10 @@ Computes the nonlinear function `y = x * max(0, min(6, (x + 3))) / 6` that is in partial interface MLGraphBuilder { MLOperand hardSwish(MLOperand input, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits hardSwish; +};
@@ -3598,6 +4085,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/hardSwish()}}: +
+ : hardSwish + :: Support limits for operator {{MLGraphBuilder/hardSwish()}}. +
+
The hardSwish(|input|, |options|) method steps are: @@ -3650,6 +4143,17 @@ partial interface MLGraphBuilder { MLOperand instanceNormalization(MLOperand input, optional MLInstanceNormalizationOptions options = {}); }; + +dictionary MLNormalizationSupportLimits { + MLSupportLimits input; + MLSupportLimits scale; + MLSupportLimits bias; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLNormalizationSupportLimits instanceNormalization; +}; {{MLInstanceNormalizationOptions}} has the following members: @@ -3680,6 +4184,24 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The instance-normalized 4-D tensor of the same shape as *input*. +{{MLNormalizationSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : scale + :: {{MLSupportLimits}} for scale operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/instanceNormalization()}}: +
+ : instanceNormalization + :: Support limits for operator {{MLGraphBuilder/instanceNormalization()}}. +
+
The instanceNormalization(|input|, |options|) method steps are: @@ -3753,6 +4275,10 @@ partial interface MLGraphBuilder { MLOperand layerNormalization(MLOperand input, optional MLLayerNormalizationOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLNormalizationSupportLimits layerNormalization; +}; {{MLLayerNormalizationOptions}} has the following members: @@ -3781,6 +4307,12 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The layer-normalized N-D tensor of the same shape as *input*. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/layerNormalization()}}: +
+ : layerNormalization + :: Support limits for operator {{MLGraphBuilder/layerNormalization()}}. +
+
The layerNormalization(|input|, |options|) method steps are: @@ -3858,6 +4390,10 @@ dictionary MLLeakyReluOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand leakyRelu(MLOperand input, optional MLLeakyReluOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits leakyRelu; +}; {{MLLeakyReluOptions}} has the following members: @@ -3876,6 +4412,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/leakyRelu()}}: +
+ : leakyRelu + :: Support limits for operator {{MLGraphBuilder/leakyRelu()}}. +
+
The leakyRelu(|input|, |options|) method steps are: @@ -3922,6 +4464,10 @@ dictionary MLLinearOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand linear(MLOperand input, optional MLLinearOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits linear; +}; {{MLLinearOptions}} has the following members: @@ -3943,6 +4489,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/linear()}}: +
+ : linear + :: Support limits for operator {{MLGraphBuilder/linear()}}. +
+
The linear(|input|, |options|) method steps are: @@ -4005,6 +4557,23 @@ partial interface MLGraphBuilder { [EnforceRange] unsigned long hiddenSize, optional MLLstmOptions options = {}); }; + +dictionary MLLstmSupportLimits { + MLSupportLimits input; + MLSupportLimits weight; + MLSupportLimits recurrentWeight; + MLSupportLimits bias; + MLSupportLimits recurrentBias; + MLSupportLimits peepholeWeight; + MLSupportLimits initialHiddenState; + MLSupportLimits initialCellState; + MLSupportLimits outputs; +}; + +partial dictionary MLOpSupportLimits { + MLLstmSupportLimits lstm; +}; + {{MLLstmOptions}} has the following members: @@ -4058,6 +4627,34 @@ partial interface MLGraphBuilder { **Returns:** [=sequence=]<{{MLOperand}}>. The first element is a 3-D tensor of shape *[numDirections, batchSize, hiddenSize]*, the output hidden state from the last time step of the network. The second element is a 3-D tensor of shape *[numDirections, batchSize, hiddenSize]*, the output cell state from the last time step of the network. Additionally, if |options|.{{MLLstmOptions/returnSequence}} is set to true, the third element is the 4-D output tensor of shape *[steps, numDirections, batchSize, hiddenSize]* containing every output from each time step in the temporal sequence. +{{MLLstmSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : weight + :: {{MLSupportLimits}} for weight operand. + : recurrentWeight + :: {{MLSupportLimits}} for recurrentWeight operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : recurrentBias + :: {{MLSupportLimits}} for recurrentBias operand. + : peepholeWeight + :: {{MLSupportLimits}} for peepholeWeight operand. + : initialHiddenState + :: {{MLSupportLimits}} for initialHiddenState operand. + : initialCellState + :: {{MLSupportLimits}} for initialCellState operand. + : outputs + :: {{MLSupportLimits}} for all the output operands. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/lstm()}}: +
+ : lstm + :: Support limits for operator {{MLGraphBuilder/lstm()}}. +
+
The lstm(|input|, |weight|, |recurrentWeight|, |steps|, |hiddenSize|, |options|) method steps are: @@ -4278,6 +4875,22 @@ partial interface MLGraphBuilder { [EnforceRange] unsigned long hiddenSize, optional MLLstmCellOptions options = {}); }; + +dictionary MLLstmCellSupportLimits { + MLSupportLimits input; + MLSupportLimits weight; + MLSupportLimits recurrentWeight; + MLSupportLimits hiddenState; + MLSupportLimits cellState; + MLSupportLimits bias; + MLSupportLimits recurrentBias; + MLSupportLimits peepholeWeight; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLLstmCellSupportLimits lstmCell; +}; {{MLLstmCellOptions}} has the following members: @@ -4316,6 +4929,34 @@ partial interface MLGraphBuilder { **Returns:** [=sequence=]<{{MLOperand}}>. The first element is the output hidden state of the current time step of the recurrent network. The following element is the output cell state. Both elements are 2-D tensors of shape *[batchSize, hiddenSize]*. +{{MLLstmCellSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : weight + :: {{MLSupportLimits}} for weight operand. + : recurrentWeight + :: {{MLSupportLimits}} for recurrentWeight operand. + : hiddenState + :: {{MLSupportLimits}} for hiddenState operand. + : cellState + :: {{MLSupportLimits}} for cellState operand. + : bias + :: {{MLSupportLimits}} for bias operand. + : recurrentBias + :: {{MLSupportLimits}} for recurrentBias operand. + : peepholeWeight + :: {{MLSupportLimits}} for peepholeWeight operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/lstmCell()}}: +
+ : lstmCell + :: Support limits for operator {{MLGraphBuilder/lstmCell()}}. +
+
The lstmCell(|input|, |weight|, |recurrentWeight|, |hiddenState|, |cellState|, |hiddenSize|, |options|) method steps are: @@ -4500,6 +5141,10 @@ Compute the matrix product of two input tensors. partial interface MLGraphBuilder { MLOperand matmul(MLOperand a, MLOperand b, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLBinarySupportLimits matmul; +};
@@ -4518,6 +5163,12 @@ partial interface MLGraphBuilder { - If either *a* or *b* is `N`-dimensional where `N > 2`, it is treated as a stack of matrices with dimensions corresponding to the last two indices. The matrix multiplication will be [=broadcast=] according to [[!numpy-broadcasting-rule]]. The shapes of *a* and *b*, except the last two dimensions, must be [=bidirectionally broadcastable=]. The output is a `N`-dimensional tensor whose rank is the maximum [=MLOperand/rank=] of the input tensors. For each dimension, except the last two, of the output tensor, its size is the maximum size along that dimension of the input tensors.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/matmul()}}: +
+ : matmul + :: Support limits for operator {{MLGraphBuilder/matmul()}}. +
+
To calculate matmul output sizes, given {{MLOperand}} |a| and {{MLOperand}} |b| run the following steps: @@ -4580,6 +5231,10 @@ partial interface MLGraphBuilder { sequence<[EnforceRange] unsigned long> endingPadding, optional MLPadOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits pad; +}; {{MLPadOptions}} has the following members: @@ -4605,6 +5260,12 @@ partial interface MLGraphBuilder { `output size = beginning padding + input size + ending padding` +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/pad()}}: +
+ : pad + :: Support limits for operator {{MLGraphBuilder/pad()}}. +
+
To calculate padding output sizes, given |input|, |beginningPadding| and |endingPadding|, run the following steps: @@ -4706,6 +5367,12 @@ partial interface MLGraphBuilder { MLOperand l2Pool2d(MLOperand input, optional MLPool2dOptions options = {}); MLOperand maxPool2d(MLOperand input, optional MLPool2dOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits averagePool2d; + MLSingleInputSupportLimits l2Pool2d; + MLSingleInputSupportLimits maxPool2d; +}; {{MLPool2dOptions}} has the following members: @@ -4772,6 +5439,16 @@ partial interface MLGraphBuilder { `output size = ceil(1 + (input size - filter size + beginning padding + ending padding) / stride)` +{{MLOpSupportLimits}} has following members for pooling operations: +
+ : averagePool2d + :: Support limits for operator {{MLGraphBuilder/averagePool2d()}}. + : l2Pool2d + :: Support limits for operator {{MLGraphBuilder/l2Pool2d()}}. + : maxPool2d + :: Support limits for operator {{MLGraphBuilder/maxPool2d()}}. +
+
A *global* pooling operation such as one for the max pooling operation is a variant of pooling where the window dimensions is the spatial dimensions (last two dimensions) of the input shape, as follows.
@@ -4907,6 +5584,16 @@ partial interface MLGraphBuilder {
                   MLOperand slope,
                   optional MLOperatorOptions options = {});
 };
+
+dictionary MLPreluSupportLimits {
+  MLSupportLimits input;
+  MLSupportLimits slope;
+  MLSupportLimits output;
+};
+
+partial dictionary MLOpSupportLimits {
+  MLPreluSupportLimits prelu;
+};
 
 
 
@@ -4919,6 +5606,22 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLPreluSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : slope + :: {{MLSupportLimits}} for slope operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/prelu()}}: +
+ : prelu + :: Support limits for operator {{MLGraphBuilder/prelu()}}. +
+
The prelu(|input|, |slope|, |options|) method steps are: @@ -4976,6 +5679,19 @@ partial interface MLGraphBuilder { MLOperand reduceSum(MLOperand input, optional MLReduceOptions options = {}); MLOperand reduceSumSquare(MLOperand input, optional MLReduceOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits reduceL1; + MLSingleInputSupportLimits reduceL2; + MLSingleInputSupportLimits reduceLogSum; + MLSingleInputSupportLimits reduceLogSumExp; + MLSingleInputSupportLimits reduceMax; + MLSingleInputSupportLimits reduceMean; + MLSingleInputSupportLimits reduceMin; + MLSingleInputSupportLimits reduceProduct; + MLSingleInputSupportLimits reduceSum; + MLSingleInputSupportLimits reduceSumSquare; +}; {{MLReduceOptions}} has the following members: @@ -5003,6 +5719,30 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The reduced output tensor. If the input operand is a scalar, the reduction function is applied to the scalar value, and the output is also a scalar.
+{{MLOpSupportLimits}} has following members for reduction operations: +
+ : reduceL1 + :: Support limits for operator {{MLGraphBuilder/reduceL1()}}. + : reduceL2 + :: Support limits for operator {{MLGraphBuilder/reduceL2()}}. + : reduceLogSum + :: Support limits for operator {{MLGraphBuilder/reduceLogSum()}}. + : reduceLogSumExp + :: Support limits for operator {{MLGraphBuilder/reduceLogSumExp()}}. + : reduceMax + :: Support limits for operator {{MLGraphBuilder/reduceMax()}}. + : reduceMean + :: Support limits for operator {{MLGraphBuilder/reduceMean()}}. + : reduceMin + :: Support limits for operator {{MLGraphBuilder/reduceMin()}}. + : reduceProduct + :: Support limits for operator {{MLGraphBuilder/reduceProduct()}}. + : reduceSum + :: Support limits for operator {{MLGraphBuilder/reduceSum()}}. + : reduceSumSquare + :: Support limits for operator {{MLGraphBuilder/reduceSumSquare()}}. +
+
**Reduction types:** - *L1*: Compute the L1 norm, the sum of the absolute value of the input values. @@ -5161,6 +5901,10 @@ Compute the partial interface MLGraphBuilder { MLOperand relu(MLOperand input, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits relu; +};
@@ -5172,6 +5916,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/relu()}}: +
+ : relu + :: Support limits for operator {{MLGraphBuilder/relu()}}. +
+
The relu(|input|, |options|) method steps are: @@ -5219,6 +5969,10 @@ dictionary MLResample2dOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand resample2d(MLOperand input, optional MLResample2dOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits resample2d; +};
**Arguments:** @@ -5252,6 +6006,12 @@ partial interface MLGraphBuilder { The default value is [2, 3]. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/resample2d()}}: +
+ : resample2d + :: Support limits for operator {{MLGraphBuilder/resample2d()}}. +
+
To check resample options given |options| and |input|, run the following steps: @@ -5305,6 +6065,10 @@ partial interface MLGraphBuilder { sequence<[EnforceRange] unsigned long> newShape, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits reshape; +};
**Arguments:** @@ -5319,6 +6083,12 @@ partial interface MLGraphBuilder { tensor is specified by the *newShape* argument.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/reshape()}}: +
+ : reshape + :: Support limits for operator {{MLGraphBuilder/reshape()}}. +
+
The reshape(|input|, |newShape|, |options|) method steps are: @@ -5348,6 +6118,10 @@ Compute the sigmoid fun partial interface MLGraphBuilder { MLOperand sigmoid(MLOperand input, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits sigmoid; +};
@@ -5359,6 +6133,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/sigmoid()}}: +
+ : sigmoid + :: Support limits for operator {{MLGraphBuilder/sigmoid()}}. +
+
The sigmoid(|input|, |options|) method steps are: @@ -5400,6 +6180,10 @@ partial interface MLGraphBuilder { sequence<[EnforceRange] unsigned long> sizes, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits slice; +};
**Arguments:** @@ -5411,6 +6195,12 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The output tensor of the same rank as the input tensor with tensor values stripped to the specified starting and ending indices in each dimension.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/slice()}}: +
+ : slice + :: Support limits for operator {{MLGraphBuilder/slice()}}. +
+
The slice(|input|, |starts|, |sizes|, |options|) method steps are: @@ -5444,6 +6234,10 @@ partial interface MLGraphBuilder { [EnforceRange] unsigned long axis, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits softmax; +};
@@ -5456,6 +6250,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output N-D tensor that contains the softmax results, of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/softmax()}}: +
+ : softmax + :: Support limits for operator {{MLGraphBuilder/softmax()}}. +
+
The softmax(|input|, |axis|, |options|) method steps are: @@ -5502,6 +6302,10 @@ Compute the @@ -5513,6 +6317,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/softplus()}}: +
+ : softplus + :: Support limits for operator {{MLGraphBuilder/softplus()}}. +
+
The softplus(|input|, |options|) method steps are: @@ -5549,6 +6359,10 @@ Compute the @@ -5575,6 +6389,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/softsign()}}: +
+ : softsign + :: Support limits for operator {{MLGraphBuilder/softsign()}}. +
+
The softsign(|input|, |options|) method steps are: @@ -5604,6 +6424,15 @@ partial interface MLGraphBuilder { ([EnforceRange] unsigned long or sequence<[EnforceRange] unsigned long>) splits, optional MLSplitOptions options = {}); }; + +dictionary MLSplitSupportLimits { + MLSupportLimits input; + MLSupportLimits outputs; +}; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits split; +};
@@ -5622,6 +6451,21 @@ partial interface MLGraphBuilder { The dimension along which to split. Its value must be in the range [0, N-1] where N is the [=MLOperand/rank=] of the input tensor. + +{{MLSplitSupportLimits}} has following members: +
+ : input + :: {{MLSupportLimits}} for input operand. + : outputs + :: {{MLSupportLimits}} for all the output operands. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/split()}}: +
+ : split + :: Support limits for operator {{MLGraphBuilder/split()}}. +
+
The split(|input|, |splits|, |options|) method steps are: @@ -5687,6 +6531,10 @@ Compute the hyperbo partial interface MLGraphBuilder { MLOperand tanh(MLOperand input, optional MLOperatorOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits tanh; +};
@@ -5698,6 +6546,12 @@ partial interface MLGraphBuilder { - an {{MLOperand}}. The output tensor of the same shape as *input*.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/tanh()}}: +
+ : tanh + :: Support limits for operator {{MLGraphBuilder/tanh()}}. +
+
The tanh(|input|, |options|) method steps are: @@ -5743,6 +6597,10 @@ dictionary MLTransposeOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand transpose(MLOperand input, optional MLTransposeOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits transpose; +}; {{MLTransposeOptions}} has the following members: @@ -5762,6 +6620,12 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The permuted or transposed N-D tensor.
+{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/transpose()}}: +
+ : transpose + :: Support limits for operator {{MLGraphBuilder/transpose()}}. +
+
The transpose(|input|, |options|) method steps are: @@ -5794,6 +6658,10 @@ dictionary MLTriangularOptions : MLOperatorOptions { partial interface MLGraphBuilder { MLOperand triangular(MLOperand input, optional MLTriangularOptions options = {}); }; + +partial dictionary MLOpSupportLimits { + MLSingleInputSupportLimits triangular; +}; {{MLTriangularOptions}} has the following members: @@ -5814,6 +6682,12 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The output tensor representing a triangular matrix, or batch of matrices which is the same shape as the input. +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/triangular()}}: +
+ : triangular + :: Support limits for operator {{MLGraphBuilder/triangular()}}. +
+
The triangular(|input|, |options|) method steps are: @@ -5903,6 +6777,17 @@ partial interface MLGraphBuilder { MLOperand falseValue, optional MLOperatorOptions options = {}); }; + +dictionary MLWhereSupportLimits { + MLSupportLimits condition; + MLSupportLimits trueValue; + MLSupportLimits falseValue; + MLSupportLimits output; +}; + +partial dictionary MLOpSupportLimits { + MLWhereSupportLimits where; +};
@@ -5915,6 +6800,25 @@ partial interface MLGraphBuilder { **Returns:** an {{MLOperand}}. The output tensor that contains the values selected element-wise from either the trueValue or the falseValue tensor.
+{{MLWhereSupportLimits}} has following members: +
+ : condition + :: {{MLSupportLimits}} for condition operand. + : trueValue + :: {{MLSupportLimits}} for trueValue operand. + : falseValue + :: {{MLSupportLimits}} for falseValue operand. + : output + :: {{MLSupportLimits}} for output operand. +
+ +{{MLOpSupportLimits}} has following member for {{MLGraphBuilder/where()}}: +
+ : where + :: Support limits for operator {{MLGraphBuilder/where()}}. +
+ +
The where(|condition|, |trueValue|, |falseValue|, |options|) method steps are: