-
Notifications
You must be signed in to change notification settings - Fork 8
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Implement more reduction #44
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,9 @@ | ||
'use strict'; | ||
|
||
import {pow} from './binary.js'; | ||
import {squeeze} from './squeeze.js'; | ||
import {sizeOfShape, Tensor} from './lib/tensor.js'; | ||
import {abs, exp, log} from './unary.js'; | ||
import {sizeOfShape, Scalar, Tensor} from './lib/tensor.js'; | ||
import {validateReduceParams} from './lib/validate-input.js'; | ||
|
||
/** | ||
|
@@ -16,9 +18,6 @@ function reduce(input, reduceFunc, {keepDimensions = false, axes} = {}) { | |
|
||
const outputShape = input.shape.slice(); | ||
for (let i = 0; i < inpAxes.length; ++i) { | ||
if (inpAxes[i] === -1) { | ||
inpAxes[i] = input.rank - 1; | ||
} | ||
outputShape[inpAxes[i]] = 1; | ||
} | ||
|
||
|
@@ -123,3 +122,60 @@ export function reduceSum(input, options = {}) { | |
return reduce(input, | ||
(previousValue, currentValue) => previousValue + currentValue, options); | ||
} | ||
|
||
/** | ||
* Compute the sum of the square of all the input values along the axes. | ||
* @param {Tensor} input | ||
* @param {MLReduceOptions} options | ||
* @return {Tensor} | ||
*/ | ||
export function reduceSumSquare(input, options = {}) { | ||
return reduceSum(pow(input, new Scalar(2)), options); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Formulas all look correct, but wasn't there a GitHub issue with ReduceL2 ambiguity? The formula I recall was ReduceL2 = (btw, I always have to re-lookup the formulas because I keep thinking "reduceSumSquare" means There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Are there other explanations for ReduceL2 besides Reference: https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh never mind, it was actually L2 pooling, not reduction: webmachinelearning/webnn#278 |
||
} | ||
|
||
/** | ||
* Compute the L1 norm of all the input values along the axes. | ||
* @param {Tensor} input | ||
* @param {MLReduceOptions} options | ||
* @return {Tensor} | ||
*/ | ||
export function reduceL1(input, options = {}) { | ||
return reduceSum(abs(input), options); | ||
} | ||
|
||
/** | ||
* Compute the L2 norm of all the input values along the axes. | ||
* @param {Tensor} input | ||
* @param {MLReduceOptions} options | ||
* @return {Tensor} | ||
*/ | ||
export function reduceL2(input, options = {}) { | ||
const intermediateResult = reduceSumSquare(input, options); | ||
if (intermediateResult.rank === 0) { | ||
return new Tensor( | ||
[], | ||
[Math.pow(intermediateResult.getValueByIndex(0), 0.5)]); | ||
} else { | ||
return pow(intermediateResult, new Scalar(0.5)); | ||
} | ||
} | ||
|
||
/** | ||
* Compute the log value of the sum of all the input values along the axes. | ||
* @param {Tensor} input | ||
* @param {MLReduceOptions} options | ||
* @return {Tensor} | ||
*/ | ||
export function reduceLogSum(input, options = {}) { | ||
return log(reduceSum(input, options)); | ||
} | ||
|
||
/** | ||
* Compute the log value of the sum of the exponent of all the input values along the axes. | ||
* @param {Tensor} input | ||
* @param {MLReduceOptions} options | ||
* @return {Tensor} | ||
*/ | ||
export function reduceLogSumExp(input, options = {}) { | ||
return log(reduceSum(exp(input), options)); | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,7 +20,7 @@ export function slice(input, starts, sizes, {axes} = {}) { | |
const axesLen = axes.length; | ||
const outputShape = input.shape.slice(); | ||
for (let i = 0; i < axesLen; ++i) { | ||
const axis = axes[i] >= 0 ? axes[i] : axes[i] + rank; | ||
const axis = axes[i]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Aah, nice, we can simplify the testing and implementations after Ningxin's removal of the negative axes policy. |
||
const size = input.shape[axis]; | ||
const start = starts[i]; | ||
startsForAllAxes[axis] = start >= 0 ? start : start + size; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🤔 Ningxin's PR removed the special handling of negative numbers related to axes, but we still have this special case with axes here with
reshape
. I wonder if this too should be a policy resolved by higher level frameworks first, and they just pass the resolved shape before it reaches the WebNN API (or does keeping Reshape's special null axis handling make the composition of other WebNN ops simpler, like your instanceNormalization implementation which utilizes reshape?) *this comment is not blocking either wayThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sorry, I don't catch your point, could you please explain how to simplify the implementation?
And I submitted a pr webmachinelearning/webnn#367 to Spec, please also have a review, thanks.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
👍 Commented on the other PR.