diff --git a/face_recognition/facenet_nchw.js b/face_recognition/facenet_nchw.js index 55de74ca..87b3e95d 100644 --- a/face_recognition/facenet_nchw.js +++ b/face_recognition/facenet_nchw.js @@ -52,8 +52,8 @@ export class FaceNetNchw { if (options.autoPad == 'same-upper') { options.padding = computePadding2DForAutoPad( - /* nchw */[input.shape()[2], input.shape()[3]], - /* oihw */[weights.shape()[2], weights.shape()[3]], + /* nchw */[input.shape[2], input.shape[3]], + /* oihw */[weights.shape[2], weights.shape[3]], options.strides, options.dilations, options.autoPad); } const conv2d = this.builder_.conv2d(input, weights, options); @@ -266,7 +266,7 @@ export class FaceNetNchw { const averagePool = this.builder_.averagePool2d(await block8_6); // Use reshape to implement squeeze(averagePool, {axes: [2, 3]}); - const squeezed_shape = averagePool.shape(); + const squeezed_shape = averagePool.shape; squeezed_shape.splice(2, 2); const squeeze = this.builder_.reshape(averagePool, squeezed_shape); const gemm = await this.buildGemm_(squeeze); diff --git a/face_recognition/facenet_nhwc.js b/face_recognition/facenet_nhwc.js index 12cf1544..c8d1cd4c 100644 --- a/face_recognition/facenet_nhwc.js +++ b/face_recognition/facenet_nhwc.js @@ -54,8 +54,8 @@ export class FaceNetNhwc { if (options.autoPad == 'same-upper') { options.padding = computePadding2DForAutoPad( - /* nwhc */[input.shape()[1], input.shape()[2]], - /* ohwi */[weights.shape()[1], weights.shape()[2]], + /* nwhc */[input.shape[1], input.shape[2]], + /* ohwi */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, options.autoPad); } const conv2d = this.builder_.conv2d(input, weights, options); diff --git a/facial_landmark_detection/ssd_mobilenetv2_face_nchw.js b/facial_landmark_detection/ssd_mobilenetv2_face_nchw.js index 9c146331..fc2e4396 100644 --- a/facial_landmark_detection/ssd_mobilenetv2_face_nchw.js +++ b/facial_landmark_detection/ssd_mobilenetv2_face_nchw.js @@ -69,8 +69,8 @@ ${nameArray[1]}`; const weights = buildConstantByNpy(this.builder_, weightsName); const biasName = prefix + biasSuffix; const bias = buildConstantByNpy(this.builder_, biasName); - const inputShape = (await input).shape(); - const weightsShape = (await weights).shape(); + const inputShape = (await input).shape; + const weightsShape = (await weights).shape; options.padding = computePadding2DForAutoPad( /* nchw */[inputShape[2], inputShape[3]], /* oihw */[weightsShape[2], weightsShape[3]], diff --git a/facial_landmark_detection/ssd_mobilenetv2_face_nhwc.js b/facial_landmark_detection/ssd_mobilenetv2_face_nhwc.js index c2cc6b7f..3acadb12 100644 --- a/facial_landmark_detection/ssd_mobilenetv2_face_nhwc.js +++ b/facial_landmark_detection/ssd_mobilenetv2_face_nhwc.js @@ -82,8 +82,8 @@ ${nameArray[1]}`; options.filterLayout = 'ihwo'; } options.bias = await bias; - const inputShape = (await input).shape(); - const weightsShape = (await weights).shape(); + const inputShape = (await input).shape; + const weightsShape = (await weights).shape; options.padding = computePadding2DForAutoPad( /* nhwc */[inputShape[1], inputShape[2]], /* ohwi or ihwo */[weightsShape[1], weightsShape[2]], diff --git a/image_classification/mobilenet_nhwc.js b/image_classification/mobilenet_nhwc.js index 3fc89bba..51e8d7e8 100644 --- a/image_classification/mobilenet_nhwc.js +++ b/image_classification/mobilenet_nhwc.js @@ -36,8 +36,8 @@ export class MobileNetV2Nhwc { if (options.autoPad == 'same-upper') { options.padding = computePadding2DForAutoPad( - /* nwhc */[await input.shape()[1], await input.shape()[2]], - /* ohwi or ihwo */[weights.shape()[1], weights.shape()[2]], + /* nwhc */[await input.shape[1], await input.shape[2]], + /* ohwi or ihwo */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, options.autoPad); } const conv2d = this.builder_.conv2d(await input, weights, options); diff --git a/image_classification/resnet50v2_nhwc.js b/image_classification/resnet50v2_nhwc.js index 0988e6b2..b9d89073 100644 --- a/image_classification/resnet50v2_nhwc.js +++ b/image_classification/resnet50v2_nhwc.js @@ -54,8 +54,8 @@ export class ResNet50V2Nhwc { if (options.autoPad == 'same-upper') { options.padding = computePadding2DForAutoPad( - /* nwhc */[await input.shape()[1], await input.shape()[2]], - /* ohwi */[weights.shape()[1], weights.shape()[2]], + /* nwhc */[await input.shape[1], await input.shape[2]], + /* ohwi */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, options.autoPad); } const conv2d = this.builder_.conv2d(await input, weights, options); @@ -144,7 +144,7 @@ export class ResNet50V2Nhwc { const pool = this.builder_.maxPool2d( conv1, {windowDimensions, strides, layout, padding: computePadding2DForAutoPad( - /* nhwc */ [conv1.shape()[1], conv1.shape()[2]], + /* nhwc */ [conv1.shape[1], conv1.shape[2]], windowDimensions, strides, /* dilations */ undefined, 'same-upper')}); // Block 1 diff --git a/image_classification/squeezenet_nhwc.js b/image_classification/squeezenet_nhwc.js index f7e50417..83e4f04a 100644 --- a/image_classification/squeezenet_nhwc.js +++ b/image_classification/squeezenet_nhwc.js @@ -35,8 +35,8 @@ export class SqueezeNetNhwc { if (options.autoPad == 'same-upper') { options.padding = computePadding2DForAutoPad( - /* nwhc */[await input.shape()[1], await input.shape()[2]], - /* ohwi */[weights.shape()[1], weights.shape()[2]], + /* nwhc */[await input.shape[1], await input.shape[2]], + /* ohwi */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, options.autoPad); } const conv2d = this.builder_.conv2d(await input, weights, options); diff --git a/nnotepad/js/nnotepad.js b/nnotepad/js/nnotepad.js index 38a57c26..88ca7978 100644 --- a/nnotepad/js/nnotepad.js +++ b/nnotepad/js/nnotepad.js @@ -37,16 +37,16 @@ const kArgTypeOperand = 3; class WebNNUtil { static bufferForOperand(operand) { - const size = [...operand.shape()].reduce((a, b) => a * b, 1); - const ctor = WebNNUtil.dataTypeToBufferType(operand.dataType()); + const size = [...operand.shape].reduce((a, b) => a * b, 1); + const ctor = WebNNUtil.dataTypeToBufferType(operand.dataType); return Reflect.construct(ctor, [size]); } static async tensorForOperand(operand, context) { const desc = { - dataType: operand.dataType(), - dimensions: operand.shape(), - shape: operand.shape(), + dataType: operand.dataType, + dimensions: operand.shape, + shape: operand.shape, usage: MLTensorUsage.READ, readable: true, }; @@ -613,9 +613,9 @@ export class NNotepad { return outputOperands.map( (op, index) => ({ - dataType: op.dataType(), - dimensions: op.shape(), - shape: op.shape(), + dataType: op.dataType, + dimensions: op.shape, + shape: op.shape, buffer: maybeProxyForFloat16Array(outputBuffers[`output-${index}`]), })); } diff --git a/nsnet2/nsnet2.js b/nsnet2/nsnet2.js index f22819b6..bb21e4e5 100644 --- a/nsnet2/nsnet2.js +++ b/nsnet2/nsnet2.js @@ -58,7 +58,7 @@ export class NSNet2 { const [gru94, gru93] = this.builder_.gru(transpose31, weight192, recurrentWeight193, frames, this.hiddenSize, {bias: bias194, recurrentBias: recurrentBias194, initialHiddenState: initialState92, returnSequence: true}); // Use reshape to implement squeeze(gru93, {axes: [1]}); - const squeeze95Shape = gru93.shape(); + const squeeze95Shape = gru93.shape; squeeze95Shape.splice(1, 1); const squeeze95 = this.builder_.reshape(gru93, squeeze95Shape); const initialState155 = this.builder_.input('initialState155', initialStateDesc); @@ -89,7 +89,7 @@ export class NSNet2 { const [gru157, gru156] = this.builder_.gru(squeeze95, weight212, recurrentWeight213, frames, this.hiddenSize, {bias: bias214, recurrentBias: recurrentBias214, initialHiddenState: initialState155, returnSequence: true}); // Use reshape to implement squeeze(gru156, {axes: [1]}); - const squeeze158Shape = gru156.shape(); + const squeeze158Shape = gru156.shape; squeeze158Shape.splice(1, 1); const squeeze158 = this.builder_.reshape(gru156, squeeze158Shape); const transpose159 = this.builder_.transpose(squeeze158, {permutation: [1, 0, 2]}); diff --git a/object_detection/ssd_mobilenetv1_nchw.js b/object_detection/ssd_mobilenetv1_nchw.js index e5e525da..4a98ac04 100644 --- a/object_detection/ssd_mobilenetv1_nchw.js +++ b/object_detection/ssd_mobilenetv1_nchw.js @@ -69,8 +69,8 @@ ${nameArray[1]}_BatchNorm_batchnorm`; const bias = await buildConstantByNpy( this.builder_, biasName, this.targetDataType_); options.padding = computePadding2DForAutoPad( - /* nchw */[input.shape()[2], input.shape()[3]], - /* oihw */[weights.shape()[2], weights.shape()[3]], + /* nchw */[input.shape[2], input.shape[3]], + /* oihw */[weights.shape[2], weights.shape[3]], options.strides, options.dilations, 'same-upper'); options.bias = bias; const conv2d = this.builder_.conv2d(input, weights, options); diff --git a/object_detection/ssd_mobilenetv1_nhwc.js b/object_detection/ssd_mobilenetv1_nhwc.js index 0d44feac..635cda01 100644 --- a/object_detection/ssd_mobilenetv1_nhwc.js +++ b/object_detection/ssd_mobilenetv1_nhwc.js @@ -76,8 +76,8 @@ ${nameArray[1]}_BatchNorm_batchnorm`; } options.bias = bias; options.padding = computePadding2DForAutoPad( - /* nhwc */[input.shape()[1], input.shape()[2]], - /* ohwi or ihwo */[weights.shape()[1], weights.shape()[2]], + /* nhwc */[input.shape[1], input.shape[2]], + /* ohwi or ihwo */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, 'same-upper'); const conv2d = this.builder_.conv2d(input, weights, options); if (relu6) { diff --git a/object_detection/tiny_yolov2_nchw.js b/object_detection/tiny_yolov2_nchw.js index 0033212a..dc051ebd 100644 --- a/object_detection/tiny_yolov2_nchw.js +++ b/object_detection/tiny_yolov2_nchw.js @@ -38,8 +38,8 @@ export class TinyYoloV2Nchw { this.builder_, weightName, this.targetDataType_); const options = {autoPad: 'same-upper'}; options.padding = computePadding2DForAutoPad( - /* nchw */[input.shape()[2], input.shape()[3]], - /* oihw */[weight.shape()[2], weight.shape()[3]], + /* nchw */[input.shape[2], input.shape[3]], + /* oihw */[weight.shape[2], weight.shape[3]], options.strides, options.dilations, 'same-upper'); options.bias = await buildConstantByNpy( this.builder_, biasName, this.targetDataType_); @@ -53,7 +53,7 @@ export class TinyYoloV2Nchw { buildMaxPool2d_(input, options) { options.padding = computePadding2DForAutoPad( - /* nchw */[input.shape()[2], input.shape()[3]], + /* nchw */[input.shape[2], input.shape[3]], options.windowDimensions, options.strides, options.dilations, 'same-upper'); return this.builder_.maxPool2d(input, options); diff --git a/object_detection/tiny_yolov2_nhwc.js b/object_detection/tiny_yolov2_nhwc.js index efeaf706..46bb3bfe 100644 --- a/object_detection/tiny_yolov2_nhwc.js +++ b/object_detection/tiny_yolov2_nhwc.js @@ -35,8 +35,8 @@ export class TinyYoloV2Nhwc { }; options.bias = bias; options.padding = computePadding2DForAutoPad( - /* nhwc */[input.shape()[1], input.shape()[2]], - /* ohwi */[weights.shape()[1], weights.shape()[2]], + /* nhwc */[input.shape[1], input.shape[2]], + /* ohwi */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, 'same-upper'); let conv = this.builder_.conv2d(input, weights, options); if (leakyRelu) { @@ -48,7 +48,7 @@ export class TinyYoloV2Nhwc { buildMaxPool2d_(input, options) { options.padding = computePadding2DForAutoPad( - /* nhwc */[input.shape()[1], input.shape()[2]], + /* nhwc */[input.shape[1], input.shape[2]], options.windowDimensions, options.strides, options.dilations, 'same-upper'); return this.builder_.maxPool2d(input, options); diff --git a/semantic_segmentation/deeplabv3_mnv2_nhwc.js b/semantic_segmentation/deeplabv3_mnv2_nhwc.js index f4698a46..50e21857 100644 --- a/semantic_segmentation/deeplabv3_mnv2_nhwc.js +++ b/semantic_segmentation/deeplabv3_mnv2_nhwc.js @@ -45,8 +45,8 @@ export class DeepLabV3MNV2Nhwc { options.filterLayout = 'ohwi'; } options.padding = computePadding2DForAutoPad( - /* nhwc */[input.shape()[1], input.shape()[2]], - /* ohwi or ihwo */[weights.shape()[1], weights.shape()[2]], + /* nhwc */[input.shape[1], input.shape[2]], + /* ohwi or ihwo */[weights.shape[1], weights.shape[2]], options.strides, options.dilations, 'same-upper'); options.bias = bias; const conv2d = this.builder_.conv2d(input, weights, options); diff --git a/style_transfer/fast_style_transfer_net.js b/style_transfer/fast_style_transfer_net.js index 08737229..1071743c 100644 --- a/style_transfer/fast_style_transfer_net.js +++ b/style_transfer/fast_style_transfer_net.js @@ -26,8 +26,8 @@ export class FastStyleTransferNet { buildInstanceNormalization_(conv2D, variableMul, variableAdd) { if ('instanceNormalization' in this.builder_) { // Use reshape to implement squeeze(variableMul); and squeeze(variableAdd); - const mulShape = variableMul.shape().filter((dim) => dim !==1); - const addShape = variableAdd.shape().filter((dim) => dim !==1); + const mulShape = variableMul.shape.filter((dim) => dim !==1); + const addShape = variableAdd.shape.filter((dim) => dim !==1); const mulSqueeze = this.builder_.reshape(variableMul, mulShape); const addSqueeze = this.builder_.reshape(variableAdd, addShape); return this.builder_.instanceNormalization(conv2D, {scale: mulSqueeze, bias: addSqueeze});